2021-04-26 20:07:00 +02:00
|
|
|
#coding:utf-8
|
|
|
|
#
|
|
|
|
# id: bugs.core_5085
|
|
|
|
# title: Allow to fixup (nbackup) database via Services API
|
2021-11-26 19:20:43 +01:00
|
|
|
# decription:
|
2021-04-26 20:07:00 +02:00
|
|
|
# Checked on 4.0.0.2119: OK.
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# tracker_id: CORE-5085
|
|
|
|
# min_versions: ['4.0']
|
|
|
|
# versions: 4.0
|
2021-11-26 19:20:43 +01:00
|
|
|
# qmid:
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
import pytest
|
2021-11-26 19:20:43 +01:00
|
|
|
from firebird.qa import db_factory, python_act, Action
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
# version: 4.0
|
|
|
|
# resources: None
|
|
|
|
|
|
|
|
substitutions_1 = []
|
|
|
|
|
|
|
|
init_script_1 = """"""
|
|
|
|
|
|
|
|
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
|
|
|
|
|
|
|
# test_script_1
|
|
|
|
#---
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# import os
|
|
|
|
# import time
|
|
|
|
# import subprocess
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# os.environ["ISC_USER"] = user_name
|
|
|
|
# os.environ["ISC_PASSWORD"] = user_password
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# db_source = db_conn.database_name
|
|
|
|
# db_delta = db_source +'.delta'
|
|
|
|
# nbk_level_0 = os.path.splitext(db_source)[0] + '.nbk00'
|
|
|
|
# #'$(DATABASE_LOCATION)tmp_core_5085.nbk_00'
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# db_conn.close()
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# def flush_and_close(file_handle):
|
|
|
|
# # https://docs.python.org/2/library/os.html#os.fsync
|
2021-11-26 19:20:43 +01:00
|
|
|
# # If you're starting with a Python file object f,
|
|
|
|
# # first do f.flush(), and
|
2021-04-26 20:07:00 +02:00
|
|
|
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
|
|
|
# global os
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# file_handle.flush()
|
|
|
|
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
|
|
|
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
|
|
|
# os.fsync(file_handle.fileno())
|
|
|
|
# file_handle.close()
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# def cleanup( f_names_list ):
|
|
|
|
# global os
|
|
|
|
# for i in range(len( f_names_list )):
|
|
|
|
# if type(f_names_list[i]) == file:
|
|
|
|
# del_name = f_names_list[i].name
|
|
|
|
# elif type(f_names_list[i]) == str:
|
|
|
|
# del_name = f_names_list[i]
|
|
|
|
# else:
|
|
|
|
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
|
|
|
# del_name = None
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# if del_name and os.path.isfile( del_name ):
|
|
|
|
# os.remove( del_name )
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# cleanup( ( db_delta, nbk_level_0, ) )
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # 1. Create standby copy: make clone of source DB using nbackup -b 0:
|
|
|
|
# ########################
|
|
|
|
# f_nbk0_log=open( os.path.join(context['temp_directory'],'tmp_nbk0_5085.log'), 'w')
|
|
|
|
# f_nbk0_err=open( os.path.join(context['temp_directory'],'tmp_nbk0_5085.err'), 'w')
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# subprocess.call( [context['nbackup_path'], '-L', db_source], stdout=f_nbk0_log, stderr=f_nbk0_err )
|
|
|
|
# subprocess.call( [context['fbsvcmgr_path'], 'service_mgr', 'action_nfix', 'dbname', db_source], stdout=f_nbk0_log, stderr=f_nbk0_err )
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# flush_and_close( f_nbk0_log )
|
|
|
|
# flush_and_close( f_nbk0_err )
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # test connect to ensure that all OK after fixup:
|
|
|
|
# ##############
|
|
|
|
# con=fdb.connect(dsn = dsn)
|
|
|
|
# cur=con.cursor()
|
|
|
|
# cur.execute('select mon$backup_state from mon$database')
|
|
|
|
# for r in cur:
|
|
|
|
# print(r[0])
|
|
|
|
# cur.close()
|
|
|
|
# con.close()
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # Check. All of these files must be empty:
|
|
|
|
# ###################################
|
|
|
|
# f_list=(f_nbk0_log, f_nbk0_err)
|
|
|
|
# for i in range(len(f_list)):
|
|
|
|
# with open( f_list[i].name,'r') as f:
|
|
|
|
# for line in f:
|
|
|
|
# if line.split():
|
|
|
|
# print( 'UNEXPECTED output in file '+f_list[i].name+': '+line.upper() )
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # Cleanup.
|
|
|
|
# ##########
|
|
|
|
# time.sleep(1)
|
|
|
|
# cleanup( (f_nbk0_log,f_nbk0_err,db_delta, nbk_level_0) )
|
2021-11-26 19:20:43 +01:00
|
|
|
#
|
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
#---
|
|
|
|
|
2021-11-26 19:20:43 +01:00
|
|
|
act_1 = python_act('db_1', substitutions=substitutions_1)
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2021-11-26 19:20:43 +01:00
|
|
|
#expected_stdout_1 = """
|
|
|
|
#0
|
|
|
|
#"""
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2021-11-26 19:20:43 +01:00
|
|
|
@pytest.mark.version('>=4.0')
|
|
|
|
def test_1(act_1: Action):
|
|
|
|
act_1.nbackup(switches=['-l', str(act_1.db.db_path)])
|
|
|
|
#with act_1.connect_server() as srv:
|
|
|
|
# This raises error in new FB OO API while calling spb.insert_string(SPBItem.DBNAME, database):
|
|
|
|
# "Internal error when using clumplet API: attempt to store data in dataless clumplet"
|
2021-12-07 13:36:20 +01:00
|
|
|
#srv.database.nfix_database(database=act_1.db.db_path)
|
2021-11-26 19:20:43 +01:00
|
|
|
# So we have to use svcmgr...
|
|
|
|
act_1.reset()
|
|
|
|
act_1.svcmgr(switches=['action_nfix', 'dbname', str(act_1.db.db_path)])
|
|
|
|
with act_1.db.connect() as con:
|
|
|
|
c = con.cursor()
|
|
|
|
result = c.execute('select mon$backup_state from mon$database').fetchall()
|
|
|
|
assert result == [(0, )]
|