8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-02-02 09:20:39 +01:00

Parallel backup, restore and sweep.

This commit is contained in:
Vlad Khorsun 2022-05-31 20:29:36 +03:00
parent 12b5de3983
commit fa90269460
52 changed files with 6353 additions and 965 deletions

View File

@ -1057,6 +1057,25 @@
#SecurityDatabase = $(dir_secDb)/security5.fdb
# ============================
# Settings for parallel work
# ============================
#
# Limit number of parallel workers for the single task. Per-process.
# Valid values are from 1 (no parallelism) to 64. All other values
# silently ignored and default value of 1 is used.
#
#MaxParallelWorkers = 1
#
# Default number of parallel workers for the single task. Per-process.
# Valid values are from 1 (no parallelism) to MaxParallelWorkers (above).
# Values less than 1 is silently ignored and default value of 1 is used.
#
#ParallelWorkers = 1
# ==============================
# Settings for Windows platforms
# ==============================

View File

@ -139,6 +139,7 @@
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\burp\burp.cpp" />
<ClCompile Include="..\..\..\src\burp\BurpTasks.cpp" />
<ClCompile Include="..\..\..\src\burp\canonical.cpp" />
<ClCompile Include="..\..\..\src\burp\misc.cpp" />
<ClCompile Include="..\..\..\src\burp\mvol.cpp" />
@ -154,6 +155,7 @@
<ItemGroup>
<ClInclude Include="..\..\..\src\burp\backu_proto.h" />
<ClInclude Include="..\..\..\src\burp\burp.h" />
<ClInclude Include="..\..\..\src\burp\BurpTasks.h" />
<ClInclude Include="..\..\..\src\burp\burp_proto.h" />
<ClInclude Include="..\..\..\src\burp\burpswi.h" />
<ClInclude Include="..\..\..\src\burp\canon_proto.h" />

View File

@ -39,6 +39,9 @@
<ClCompile Include="..\..\..\gen\burp\restore.cpp">
<Filter>BURP files\Generated files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\burp\BurpTasks.cpp">
<Filter>BURP files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="..\..\..\src\burp\backup.epp">
@ -79,5 +82,8 @@
<ClInclude Include="..\..\..\src\burp\resto_proto.h">
<Filter>Header files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\burp\BurpTasks.h">
<Filter>Header files</Filter>
</ClInclude>
</ItemGroup>
</Project>

View File

@ -94,6 +94,7 @@
<ClCompile Include="..\..\..\src\common\StatementMetadata.cpp" />
<ClCompile Include="..\..\..\src\common\StatusArg.cpp" />
<ClCompile Include="..\..\..\src\common\StatusHolder.cpp" />
<ClCompile Include="..\..\..\src\common\Task.cpp" />
<ClCompile Include="..\..\..\src\common\TextType.cpp" />
<ClCompile Include="..\..\..\src\common\ThreadData.cpp" />
<ClCompile Include="..\..\..\src\common\ThreadStart.cpp" />
@ -211,6 +212,7 @@
<ClInclude Include="..\..\..\src\common\StatusArg.h" />
<ClInclude Include="..\..\..\src\common\StatusHolder.h" />
<ClInclude Include="..\..\..\src\common\stuff.h" />
<ClInclude Include="..\..\..\src\common\Task.h" />
<ClInclude Include="..\..\..\src\common\TextType.h" />
<ClInclude Include="..\..\..\src\common\ThreadData.h" />
<ClInclude Include="..\..\..\src\common\ThreadStart.h" />

View File

@ -249,6 +249,9 @@
<ClCompile Include="..\..\..\src\common\Int128.cpp">
<Filter>common</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\common\Task.cpp">
<Filter>common</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\common\classes\TimerImpl.cpp">
<Filter>classes</Filter>
</ClCompile>
@ -602,6 +605,9 @@
<ClInclude Include="..\..\..\src\common\Int128.h">
<Filter>headers</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\common\Task.h">
<Filter>headers</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\common\classes\TimerImpl.h">
<Filter>headers</Filter>
</ClInclude>

View File

@ -173,6 +173,7 @@
<ClCompile Include="..\..\..\src\jrd\validation.cpp" />
<ClCompile Include="..\..\..\src\jrd\vio.cpp" />
<ClCompile Include="..\..\..\src\jrd\VirtualTable.cpp" />
<ClCompile Include="..\..\..\src\jrd\WorkerAttachment.cpp" />
<ClCompile Include="..\..\..\src\lock\lock.cpp" />
<ClCompile Include="..\..\..\src\utilities\gsec\gsec.cpp" />
<ClCompile Include="..\..\..\src\utilities\gstat\ppg.cpp" />
@ -359,6 +360,7 @@
<ClInclude Include="..\..\..\src\jrd\vio_debug.h" />
<ClInclude Include="..\..\..\src\jrd\vio_proto.h" />
<ClInclude Include="..\..\..\src\jrd\VirtualTable.h" />
<ClInclude Include="..\..\..\src\jrd\WorkerAttachment.h" />
</ItemGroup>
<ItemGroup>
<None Include="..\..\..\src\dsql\DdlNodes.epp" />

View File

@ -522,6 +522,9 @@
<ClCompile Include="..\..\..\src\jrd\optimizer\Retrieval.cpp">
<Filter>Optimizer</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\jrd\WorkerAttachment.cpp">
<Filter>JRD files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\src\jrd\recsrc\RecordSource.h">
@ -1064,6 +1067,9 @@
<ClInclude Include="..\..\..\src\jrd\QualifiedName.h">
<Filter>Header files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\jrd\WorkerAttachment.h">
<Filter>Header files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<None Include="..\..\..\src\dsql\DdlNodes.epp">

View File

@ -272,6 +272,18 @@ int alice(Firebird::UtilSvc* uSvc)
}
}
if (table->in_sw_value & sw_parallel_workers)
{
if (--argc <= 0) { // TODO: error message!
ALICE_error(6); // msg 6: number of page buffers for cache required
}
ALICE_upper_case(*argv++, string, sizeof(string));
if ((!(tdgbl->ALICE_data.ua_parallel_workers = atoi(string))) && (strcmp(string, "0")))
{
ALICE_error(7); // msg 7: numeric value required
}
}
if (table->in_sw_value & sw_housekeeping)
{
if (--argc <= 0) {

View File

@ -93,6 +93,7 @@ struct user_action
USHORT ua_db_SQL_dialect;
alice_shut_mode ua_shutdown_mode;
alice_repl_mode ua_replica_mode;
SSHORT ua_parallel_workers;
};

View File

@ -60,6 +60,7 @@ const SINT64 sw_buffers = 0x0000000020000000L;
const SINT64 sw_mode = 0x0000000040000000L;
const SINT64 sw_set_db_dialect = 0x0000000080000000L;
const SINT64 sw_trusted_auth = QUADCONST(0x0000000100000000); // Byte 4, Bit 0
const SINT64 sw_parallel_workers= QUADCONST(0x0000000200000000);
const SINT64 sw_fetch_password = QUADCONST(0x0000000800000000);
const SINT64 sw_nolinger = QUADCONST(0x0000001000000000);
const SINT64 sw_icu = QUADCONST(0x0000002000000000);
@ -124,7 +125,8 @@ enum alice_switches
IN_SW_ALICE_NOLINGER = 47,
IN_SW_ALICE_ICU = 48,
IN_SW_ALICE_ROLE = 49,
IN_SW_ALICE_REPLICA = 50
IN_SW_ALICE_REPLICA = 50,
IN_SW_ALICE_PARALLEL_WORKERS = 51
};
static const char* const ALICE_SW_ASYNC = "ASYNC";
@ -213,6 +215,9 @@ static const Switches::in_sw_tab_t alice_in_sw_table[] =
{IN_SW_ALICE_PROMPT, 0, "PROMPT", sw_prompt,
sw_list, 0, false, false, 41, 2, NULL},
// msg 41: \t-prompt\t\tprompt for commit/rollback (-l)
{IN_SW_ALICE_PARALLEL_WORKERS, isc_spb_rpr_par_workers, "PARALLEL", sw_parallel_workers,
sw_sweep, 0, false, false, 136, 3, NULL},
// msg 136: -par(allel) parallel workers <n> (-sweep)
{IN_SW_ALICE_PASSWORD, 0, "PASSWORD", sw_password,
0, (sw_trusted_auth | sw_fetch_password),
false, false, 42, 2, NULL},

View File

@ -325,6 +325,10 @@ static void buildDpb(Firebird::ClumpletWriter& dpb, const SINT64 switches)
dpb.insertByte(isc_dpb_set_db_replica, tdgbl->ALICE_data.ua_replica_mode);
}
if (switches & sw_parallel_workers) {
dpb.insertInt(isc_dpb_parallel_workers, tdgbl->ALICE_data.ua_parallel_workers);
}
if (switches & sw_nolinger)
dpb.insertTag(isc_dpb_nolinger);

1111
src/burp/BurpTasks.cpp Normal file

File diff suppressed because it is too large Load Diff

611
src/burp/BurpTasks.h Normal file
View File

@ -0,0 +1,611 @@
/*
* PROGRAM: JRD Backup and Restore Program
* MODULE: BurpTasks.h
* DESCRIPTION:
*
* The contents of this file are subject to the Interbase Public
* License Version 1.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy
* of the License at http://www.Inprise.com/IPL.html
*
* Software distributed under the License is distributed on an
* "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express
* or implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#ifndef BURP_TASKS_H
#define BURP_TASKS_H
#include <stdio.h>
#include "../common/common.h"
#include "../burp/burp.h"
#include "../common/ThreadData.h"
#include "../common/Task.h"
#include "../common/UtilSvc.h"
#include "../common/classes/array.h"
#include "../common/classes/condition.h"
#include "../common/classes/fb_atomic.h"
namespace Firebird {
class ReadRelationMeta
{
public:
ReadRelationMeta() :
m_blr(*getDefaultMemoryPool())
{
clear();
}
void setRelation(const burp_rel* relation, bool partition);
void clear();
bool haveInputs() const
{
return m_inMgsNum != m_outMgsNum;
}
//private:
const burp_rel* m_relation;
SSHORT m_fldCount;
SSHORT m_inMgsNum;
SSHORT m_outMgsNum;
Firebird::HalfStaticArray<UCHAR, 256> m_blr;
ULONG m_outMsgLen;
ULONG m_outRecLen;
ULONG m_outEofOffset;
};
class ReadRelationReq
{
public:
ReadRelationReq() :
m_outMsg(*getDefaultMemoryPool())
{
m_relation = NULL;
m_meta = NULL;
memset(&m_inMgs, 0, sizeof(m_inMgs));
m_eof = NULL;
m_request = 0;
}
~ReadRelationReq()
{
clear();
}
void reset(const ReadRelationMeta* meta);
void clear();
void compile(Firebird::CheckStatusWrapper* status, IAttachment* db);
void setParams(ULONG loPP, ULONG hiPP);
void start(Firebird::CheckStatusWrapper* status, ITransaction* tran);
void receive(Firebird::CheckStatusWrapper* status);
void release(Firebird::CheckStatusWrapper* status);
const ReadRelationMeta* getMeta() const
{
return m_meta;
}
const UCHAR* getData() const
{
return m_outMsg.begin();
}
bool eof() const
{
return *m_eof;
}
private:
struct INMSG
{
ULONG loPP;
ULONG hiPP;
};
const burp_rel* m_relation;
const ReadRelationMeta* m_meta;
INMSG m_inMgs;
Firebird::Array<UCHAR> m_outMsg;
SSHORT* m_eof;
IRequest* m_request;
};
class WriteRelationMeta
{
public:
WriteRelationMeta() :
m_blr(*getDefaultMemoryPool())
{
clear();
}
void setRelation(BurpGlobals* tdgbl, const burp_rel* relation);
void clear();
IBatch* createBatch(BurpGlobals* tdgbl, IAttachment* att);
//private:
bool prepareBatch(BurpGlobals* tdgbl);
void prepareRequest(BurpGlobals* tdgbl);
const burp_rel* m_relation;
Firebird::Mutex m_mutex;
bool m_batchMode;
bool m_batchOk;
ULONG m_inMsgLen;
// batch mode
Firebird::string m_sqlStatement;
Firebird::RefPtr<Firebird::IMessageMetadata> m_batchMeta;
unsigned m_batchStep;
unsigned m_batchInlineBlobLimit;
// request mode
SSHORT m_inMgsNum;
Firebird::HalfStaticArray<UCHAR, 256> m_blr;
};
class WriteRelationReq
{
public:
WriteRelationReq() :
m_inMsg(*getDefaultMemoryPool()),
m_batchMsg(*getDefaultMemoryPool())
{
m_relation = nullptr;
m_meta = nullptr;
m_batch = nullptr;
m_request = nullptr;
m_recs = 0;
m_resync = true;
}
~WriteRelationReq()
{
clear();
}
void reset(WriteRelationMeta* meta);
void clear();
void compile(BurpGlobals* tdgbl, IAttachment* att);
void send(BurpGlobals* tdgbl, ITransaction* tran, bool lastRec);
void release();
ULONG getDataLength() const
{
return m_inMsg.getCount();
}
UCHAR* getData()
{
return m_inMsg.begin();
}
IBatch* getBatch() const
{
return m_batch;
}
ULONG getBatchMsgLength() const
{
return m_batchMsg.getCount();
}
UCHAR* getBatchMsgData()
{
return m_batchMsg.begin();
}
unsigned getBatchInlineBlobLimit() const
{
return m_meta->m_batchInlineBlobLimit;
}
private:
const burp_rel* m_relation;
WriteRelationMeta* m_meta;
Firebird::Array<UCHAR> m_inMsg;
Firebird::Array<UCHAR> m_batchMsg;
IBatch* m_batch;
IRequest* m_request;
int m_recs;
bool m_resync;
};
// forward declaration
class IOBuffer;
class BackupRelationTask : public Jrd::Task
{
public:
BackupRelationTask(BurpGlobals* tdgbl);
~BackupRelationTask();
void SetRelation(burp_rel* relation);
bool Handler(WorkItem& _item);
bool GetWorkItem(WorkItem** pItem);
bool GetResult(IStatus* status);
int GetMaxWorkers();
class Item : public Task::WorkItem
{
public:
Item(BackupRelationTask* task, bool writer) : WorkItem(task),
m_inuse(false),
m_writer(writer),
m_ownAttach(!writer),
m_gbl(NULL),
m_att(0),
m_tra(0),
m_relation(NULL),
m_ppSequence(0),
m_cleanBuffers(*getDefaultMemoryPool()),
m_buffer(NULL)
{}
BackupRelationTask* getBackupTask() const
{
return reinterpret_cast<BackupRelationTask*> (m_task);
}
class EnsureUnlockBuffer
{
public:
EnsureUnlockBuffer(Item* item) : m_item(item) {}
~EnsureUnlockBuffer();
private:
Item* m_item;
};
bool m_inuse;
bool m_writer; // file writer or table reader
bool m_ownAttach;
BurpGlobals* m_gbl;
IAttachment* m_att;
ITransaction* m_tra;
burp_rel* m_relation;
ReadRelationReq m_request;
ULONG m_ppSequence; // PP to read
Mutex m_mutex;
HalfStaticArray<IOBuffer*, 2> m_cleanBuffers;
IOBuffer* m_buffer;
Semaphore m_cleanSem;
};
static BackupRelationTask* getBackupTask(BurpGlobals* tdgbl);
BurpGlobals* getMasterGbl() const
{
return m_masterGbl;
}
static void recordAdded(BurpGlobals* tdgbl); // reader
static IOBuffer* renewBuffer(BurpGlobals* tdgbl); // reader
bool isStopped() const
{
return m_stop;
}
Mutex burpOutMutex;
private:
void initItem(BurpGlobals* tdgbl, Item& item);
void freeItem(Item& item);
bool fileWriter(Item& item);
bool tableReader(Item& item);
void releaseBuffer(Item& item); // reader
IOBuffer* getCleanBuffer(Item& item); // reader
void putDirtyBuffer(IOBuffer* buf); // reader
IOBuffer* getDirtyBuffer(); // writer
void putCleanBuffer(IOBuffer* buf); // writer
BurpGlobals* m_masterGbl;
burp_rel* m_relation;
ReadRelationMeta m_metadata;
int m_readers; // number of active readers, could be less than items allocated
bool m_readDone; // true when all readers are done
ULONG m_nextPP;
Mutex m_mutex;
HalfStaticArray<Item*, 8> m_items;
ISC_STATUS_ARRAY m_status;
volatile bool m_stop;
bool m_error;
HalfStaticArray<IOBuffer*, 16> m_buffers;
HalfStaticArray<IOBuffer*, 8> m_dirtyBuffers;
Semaphore m_dirtySem;
};
class RestoreRelationTask : public Jrd::Task
{
public:
RestoreRelationTask(BurpGlobals* tdgbl);
~RestoreRelationTask();
void SetRelation(BurpGlobals* tdgbl, burp_rel* relation);
bool Handler(WorkItem& _item);
bool GetWorkItem(WorkItem** pItem);
bool GetResult(IStatus* status);
int GetMaxWorkers();
class Item : public Task::WorkItem
{
public:
Item(RestoreRelationTask* task, bool reader) : WorkItem(task),
m_inuse(false),
m_reader(reader),
m_ownAttach(!reader),
m_gbl(NULL),
m_att(0),
m_tra(0),
m_relation(NULL),
m_buffer(NULL)
{}
RestoreRelationTask* getRestoreTask() const
{
return reinterpret_cast<RestoreRelationTask*> (m_task);
}
class EnsureUnlockBuffer
{
public:
EnsureUnlockBuffer(Item* item) : m_item(item) {}
~EnsureUnlockBuffer();
private:
Item* m_item;
};
bool m_inuse;
bool m_reader; // file reader or table writer
bool m_ownAttach;
BurpGlobals* m_gbl;
IAttachment* m_att;
ITransaction* m_tra;
burp_rel* m_relation;
WriteRelationReq m_request;
Mutex m_mutex;
IOBuffer* m_buffer;
};
class ExcReadDone : public Exception
{
public:
ExcReadDone() throw() : Exception() { }
virtual void stuffByException(StaticStatusVector& status_vector) const throw();
virtual const char* what() const throw();
static void raise();
};
static RestoreRelationTask* getRestoreTask(BurpGlobals* tdgbl);
BurpGlobals* getMasterGbl() const
{
return m_masterGbl;
}
static IOBuffer* renewBuffer(BurpGlobals* tdgbl); // writer
bool isStopped() const
{
return m_stop;
}
rec_type getLastRecord() const
{
return m_lastRecord;
}
void verbRecs(FB_UINT64& records, bool total);
void verbRecsFinal();
// commit and detach all worker connections
bool finish();
Mutex burpOutMutex;
private:
void initItem(BurpGlobals* tdgbl, Item& item);
bool freeItem(Item& item, bool commit);
bool fileReader(Item& item);
bool tableWriter(BurpGlobals* tdgbl, Item& item);
void releaseBuffer(Item& item); // writer
// reader needs clean buffer to read backup file into
IOBuffer* getCleanBuffer(); // reader
// put buffer full of records to be handled by writer
void putDirtyBuffer(IOBuffer* buf); // reader
IOBuffer* getDirtyBuffer(); // writer
void putCleanBuffer(IOBuffer* buf); // writer
void checkSpace(IOBuffer** pBuf, const FB_SIZE_T length, UCHAR** pData, FB_SIZE_T* pSpace);
IOBuffer* read_blob(BurpGlobals* tdgbl, IOBuffer* ioBuf);
IOBuffer* read_array(BurpGlobals* tdgbl, IOBuffer* ioBuf);
BurpGlobals* m_masterGbl;
burp_rel* m_relation;
rec_type m_lastRecord; // last backup record read for relation, usually rec_relation_end
WriteRelationMeta m_metadata;
int m_writers; // number of active writers, could be less than items allocated
bool m_readDone; // all records was read
Mutex m_mutex;
HalfStaticArray<Item*, 8> m_items;
ISC_STATUS_ARRAY m_status;
volatile bool m_stop;
bool m_error;
Firebird::AtomicCounter m_records; // records restored for the current relation
FB_UINT64 m_verbRecs; // last records count reported
HalfStaticArray<IOBuffer*, 16> m_buffers;
HalfStaticArray<IOBuffer*, 16> m_cleanBuffers;
HalfStaticArray<IOBuffer*, 16> m_dirtyBuffers;
Semaphore m_cleanSem;
Condition m_dirtyCond;
};
class IOBuffer
{
public:
IOBuffer(void*, FB_SIZE_T size);
~IOBuffer();
UCHAR* getBuffer() const
{
return m_aligned;
}
FB_SIZE_T getSize() const
{
return m_size;
}
FB_SIZE_T getRecs() const
{
return m_recs;
}
FB_SIZE_T getUsed() const
{
return m_used;
}
void setUsed(FB_SIZE_T used)
{
fb_assert(used <= m_size);
m_used = used;
}
void clear()
{
m_used = 0;
m_recs = 0;
m_next = NULL;
m_linked = false;
}
void recordAdded()
{
m_recs++;
}
void linkNext(IOBuffer* buf)
{
m_next = buf;
m_next->m_linked = true;
}
bool isLinked() const
{
return m_linked;
}
void lock()
{
m_mutex.enter(FB_FUNCTION);
fb_assert(m_locked >= 0);
m_locked++;
}
void unlock(bool opt = false)
{
if (opt) // unlock only if locked by me
{
if (m_locked == 0)
return;
if (!m_mutex.tryEnter(FB_FUNCTION))
return;
m_mutex.leave();
}
fb_assert(m_locked > 0);
m_locked--;
m_mutex.leave();
}
IOBuffer* getNext()
{
return m_next;
}
void* getItem() const
{
return m_item;
}
private:
void* const m_item;
UCHAR* m_memory;
UCHAR* m_aligned;
const FB_SIZE_T m_size;
FB_SIZE_T m_used;
FB_SIZE_T m_recs;
IOBuffer* m_next;
bool m_linked;
int m_locked;
Mutex m_mutex;
};
class BurpMaster
{
public:
BurpMaster()
{
m_tdgbl = BurpGlobals::getSpecific();
m_task = BackupRelationTask::getBackupTask(m_tdgbl);
if (!m_tdgbl->master)
m_tdgbl = m_task->getMasterGbl();
if (m_task)
m_task->burpOutMutex.enter(FB_FUNCTION);
}
~BurpMaster()
{
if (m_task)
m_task->burpOutMutex.leave();
}
BurpGlobals* get() const
{
return m_tdgbl;
}
private:
BackupRelationTask* m_task;
BurpGlobals* m_tdgbl;
};
} // namespace Firebird
#endif // BURP_TASKS_H

View File

@ -60,9 +60,10 @@
#include "../common/classes/BlobWrapper.h"
#include "../common/classes/MsgPrint.h"
#include "../burp/OdsDetection.h"
#include "../burp/BurpTasks.h"
using MsgFormat::SafeArg;
using Firebird::FbLocalStatus;
using namespace Firebird;
// For service APIs the follow DB handle is a value stored
@ -111,7 +112,7 @@ void put_array(burp_fld*, burp_rel*, ISC_QUAD*);
void put_asciz(const att_type, const TEXT*);
void put_blob(burp_fld*, ISC_QUAD&);
bool put_blr_blob(att_type, ISC_QUAD&);
void put_data(burp_rel*);
void put_data(burp_rel*, ReadRelationReq*);
void put_index(burp_rel*);
int put_message(att_type, att_type, const TEXT*, const ULONG);
void put_int32(att_type, SLONG);
@ -246,6 +247,53 @@ int BACKUP_backup(const TEXT* dbb_file, const TEXT* file_name)
EXEC SQL SET TRANSACTION NAME gds_trans;
}
// get shared snapshot number if asked for parallel backup
tdgbl->tr_snapshot = 0;
if (gds_trans && tdgbl->gbl_sw_par_workers > 1)
{
UCHAR in_buf[] = {fb_info_tra_snapshot_number, isc_info_end};
UCHAR out_buf[16] = {0};
gds_trans->getInfo(fbStatus, sizeof(in_buf), in_buf, sizeof(out_buf), out_buf);
if (fbStatus->isEmpty())
{
UCHAR* p = out_buf, *e = out_buf + sizeof(out_buf);
while (p < e)
{
SSHORT len;
switch (*p++)
{
case isc_info_error:
case isc_info_end:
p = e;
break;
case fb_info_tra_snapshot_number:
len = isc_portable_integer(p, 2);
p += 2;
tdgbl->tr_snapshot = isc_portable_integer(p, len);
p += len;
break;
}
}
}
if (tdgbl->tr_snapshot == 0)
tdgbl->gbl_sw_par_workers = 1;
}
// detect if MAKE_DBKEY is supported and decide kind of read relation query
if (tdgbl->gbl_sw_par_workers > 1)
{
const char* sql = "SELECT MAKE_DBKEY(0, 0) FROM RDB$DATABASE";
IStatement* stmt = DB->prepare(fbStatus, gds_trans, 0, sql, 3, 0);
if (fbStatus->getState() & IStatus::RESULT_ERROR)
{
// BURP_print_status(false, isc_status);
tdgbl->gbl_sw_par_workers = 1;
}
if (stmt)
stmt->free(fbStatus);
}
// decide what type of database we've got
@ -345,6 +393,10 @@ int BACKUP_backup(const TEXT* dbb_file, const TEXT* file_name)
// Now go back and write all data
{
Jrd::Coordinator coord(getDefaultMemoryPool());
Firebird::BackupRelationTask task(tdgbl);
for (burp_rel* relation = tdgbl->relations; relation; relation = relation->rel_next)
{
put(tdgbl, (UCHAR) rec_relation_data);
@ -355,11 +407,18 @@ int BACKUP_backup(const TEXT* dbb_file, const TEXT* file_name)
{
put_index(relation);
if (!(tdgbl->gbl_sw_meta || tdgbl->skipRelation(relation->rel_name)))
put_data(relation);
{
task.SetRelation(relation);
coord.RunSync(&task);
if (!task.GetResult(NULL))
BURP_exit_local(FINI_ERROR, tdgbl);
}
}
put(tdgbl, (UCHAR) rec_relation_end);
}
}
// now for the new triggers in rdb$triggers
BURP_verbose(159);
@ -1035,6 +1094,7 @@ void put_array( burp_fld* field, burp_rel* relation, ISC_QUAD* blob_id)
if (!status_vector.isSuccess())
{
BurpMaster master;
BURP_print(false, 81, field->fld_name);
// msg 81 error accessing blob field %s -- continuing
BURP_print_status(false, &status_vector);
@ -1180,6 +1240,7 @@ void put_blob( burp_fld* field, ISC_QUAD& blob_id)
if (!blob.open(DB, gds_trans, blob_id))
{
BurpMaster master;
BURP_print(false, 81, field->fld_name);
// msg 81 error accessing blob field %s -- continuing
BURP_print_status(false, &status_vector);
@ -1403,7 +1464,7 @@ bool put_blr_blob( att_type attribute, ISC_QUAD& blob_id)
}
void put_data(burp_rel* relation)
void put_data(burp_rel* relation, ReadRelationReq* request)
{
/**************************************
*
@ -1416,296 +1477,81 @@ void put_data(burp_rel* relation)
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
USHORT field_count = 1; // eof field
BackupRelationTask* task = BackupRelationTask::getBackupTask(tdgbl);
const ReadRelationMeta* reqMeta = request->getMeta();
HalfStaticArray<burp_fld*, 4> blobFlds;
HalfStaticArray<burp_fld*, 4> arrayFlds;
SSHORT count = 0;
burp_fld* field;
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (!(field->fld_flags & FLD_computed))
{
field_count += 2;
}
}
fb_assert(field_count > 0 && field_count * 9 > 0 && field_count * 9 + 200 > 0);
// Time to generate blr to fetch data. Make sure we allocate a BLR buffer
// large enough to handle the per field overhead
UCHAR* const blr_buffer = BURP_alloc(200 + field_count * 9);
UCHAR* blr = blr_buffer;
add_byte(blr, blr_version4);
add_byte(blr, blr_begin);
add_byte(blr, blr_message);
add_byte(blr, 0); // Message number
add_word(blr, field_count); // Number of fields, counting eof
RCRD_OFFSET offset = 0;
USHORT count = 0; // This is param count.
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
SSHORT alignment = 4;
FLD_LENGTH length = field->fld_length;
SSHORT dtype = field->fld_type;
count += 2;
if (field->fld_flags & FLD_array)
{
dtype = blr_blob;
length = 8;
}
switch (dtype)
{
case blr_text:
alignment = type_alignments[dtype_text];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
break;
case blr_varying:
alignment = type_alignments[dtype_varying];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
length += sizeof(USHORT);
break;
case blr_short:
alignment = type_alignments[dtype_short];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_long:
alignment = type_alignments[dtype_long];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_quad:
alignment = type_alignments[dtype_quad];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int64:
alignment = type_alignments[dtype_int64];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int128:
alignment = type_alignments[dtype_int128];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_double:
alignment = type_alignments[dtype_double];
add_byte(blr, field->fld_type);
break;
case blr_timestamp:
alignment = type_alignments[dtype_timestamp];
add_byte(blr, field->fld_type);
break;
case blr_timestamp_tz:
alignment = type_alignments[dtype_timestamp_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_time:
alignment = type_alignments[dtype_sql_time];
add_byte(blr, field->fld_type);
break;
case blr_sql_time_tz:
alignment = type_alignments[dtype_sql_time_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_date:
alignment = type_alignments[dtype_sql_date];
add_byte(blr, field->fld_type);
break;
case blr_float:
alignment = type_alignments[dtype_real];
add_byte(blr, field->fld_type);
break;
case blr_blob:
alignment = type_alignments[dtype_blob];
add_byte(blr, blr_quad);
add_byte(blr, 0);
break;
case blr_bool:
alignment = type_alignments[dtype_boolean];
add_byte(blr, field->fld_type);
break;
case blr_dec64:
case blr_dec128:
alignment = type_alignments[dtype];
add_byte(blr, field->fld_type);
break;
default:
BURP_error_redirect(NULL, 26, SafeArg() << field->fld_type);
// msg 26 datatype %ld not understood
break;
}
if (alignment)
offset = FB_ALIGN(offset, alignment);
field->fld_offset = offset;
field->fld_parameter = count++;
offset += length;
arrayFlds.add(field);
else if (field->fld_type == blr_blob)
blobFlds.add(field);
}
count++; // eof
// Next, build fields for null flags
//BURP_verbose(142, relation->rel_name);
//// msg 142 writing data for relation %s
for (field = relation->rel_fields; field; field = field->fld_next)
FbLocalStatus status;
request->start(&status, gds_trans);
if (status->getState() & IStatus::STATE_ERRORS)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_short);
add_byte(blr, 0);
offset = FB_ALIGN(offset, sizeof(SSHORT));
field->fld_missing_parameter = count++;
offset += sizeof(SSHORT);
}
// Finally, make up an EOF field
add_byte(blr, blr_short); // eof field
add_byte(blr, 0); // scale for eof field
SSHORT eof_parameter = count++;
RCRD_OFFSET record_length = offset;
RCRD_OFFSET eof_offset = FB_ALIGN(offset, sizeof(SSHORT));
// To be used later for the buffer size to receive data
const RCRD_LENGTH length = (RCRD_LENGTH) (eof_offset + sizeof(SSHORT));
// Build FOR loop, body, and eof handler
add_byte(blr, blr_for);
add_byte(blr, blr_rse);
add_byte(blr, 1); // count of relations
add_byte(blr, blr_rid);
add_word(blr, relation->rel_id);
add_byte(blr, 0); // context variable
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, 0);
add_byte(blr, blr_begin);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 1);
add_byte(blr, blr_parameter);
add_byte(blr, 0);
add_word(blr, eof_parameter);
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_assignment);
add_byte(blr, blr_fid);
add_byte(blr, 0);
add_word(blr, field->fld_id);
add_byte(blr, blr_parameter2);
add_byte(blr, 0);
add_word(blr, field->fld_parameter);
add_word(blr, field->fld_missing_parameter);
}
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, 0);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 0);
add_byte(blr, blr_parameter);
add_byte(blr, 0);
add_word(blr, eof_parameter);
add_byte(blr, blr_end);
add_byte(blr, blr_eoc);
unsigned blr_length = blr - blr_buffer;
#ifdef DEBUG
if (debug_on)
fb_print_blr(blr_buffer, blr_length, NULL, NULL, 0);
#endif
// Compile request
FbLocalStatus status_vector;
Firebird::IRequest* request = DB->compileRequest(&status_vector, blr_length, blr_buffer);
if (!status_vector.isSuccess())
{
BURP_error_redirect(&status_vector, 27);
// msg 27 isc_compile_request failed
fb_print_blr(blr_buffer, blr_length, NULL, NULL, 0);
}
BURP_free(blr_buffer);
BURP_verbose(142, relation->rel_name);
// msg 142 writing data for relation %s
request->start(&status_vector, gds_trans, 0);
if (!status_vector.isSuccess())
{
BURP_error_redirect(&status_vector, 28);
BURP_error_redirect(&status, 28);
// msg 28 isc_start_request failed
}
// Here is the crux of the problem -- writing data. All this work
// for the following small loop.
UCHAR* buffer = BURP_alloc(length);
SSHORT* eof = (SSHORT *) (buffer + eof_offset);
const UCHAR* buffer = request->getData();
// the XDR representation may be even fluffier
lstring xdr_buffer;
if (tdgbl->gbl_sw_transportable)
{
xdr_buffer.lstr_length = xdr_buffer.lstr_allocated = length + count * 3;
xdr_buffer.lstr_length = xdr_buffer.lstr_allocated = reqMeta->m_outMsgLen + count * 3;
xdr_buffer.lstr_address = BURP_alloc(xdr_buffer.lstr_length);
}
else
xdr_buffer.lstr_address = NULL;
RCRD_OFFSET record_length = reqMeta->m_outRecLen;
FB_UINT64 records = 0;
while (true)
{
request->receive(&status_vector, 0, 0, length, buffer);
if (!status_vector.isSuccess())
if (task->isStopped())
break;
request->receive(&status);
if (status->getState() & IStatus::STATE_ERRORS)
{
BURP_error_redirect(&status_vector, 29);
BURP_error_redirect(&status, 29);
// msg 29 isc_receive failed
}
if (!*eof)
if (!request->eof())
break;
records++;
// Verbose records
if ((records % tdgbl->verboseInterval) == 0)
BURP_verbose(108, SafeArg() << records);
//if ((records % tdgbl->verboseInterval) == 0)
// BURP_verbose(108, SafeArg() << records);
put(tdgbl, (UCHAR) rec_data);
put_int32(att_data_length, record_length);
const UCHAR* p;
if (tdgbl->gbl_sw_transportable)
{
record_length = CAN_encode_decode(relation, &xdr_buffer, buffer, true);
record_length = CAN_encode_decode(relation, &xdr_buffer, const_cast<UCHAR*>(buffer), TRUE);
put_int32(att_xdr_length, record_length);
p = xdr_buffer.lstr_address;
}
@ -1719,38 +1565,38 @@ void put_data(burp_rel* relation)
// Look for any blobs to write
for (field = relation->rel_fields; field; field = field->fld_next)
for (burp_fld** pField = blobFlds.begin(); pField < blobFlds.end(); pField++)
{
if (field->fld_type == blr_blob &&
!(field->fld_flags & FLD_computed) && !(field->fld_flags & FLD_array))
{
put_blob(field, *(ISC_QUAD*) (buffer + field->fld_offset));
}
if (task->isStopped())
break;
field = *pField;
put_blob(field, *(ISC_QUAD*) (buffer + field->fld_offset));
}
// Look for any array to write
// we got back the blob_id for the array from isc_receive in the second param.
for (field = relation->rel_fields; field; field = field->fld_next)
for (burp_fld** pField = arrayFlds.begin(); pField < arrayFlds.end(); pField++)
{
if (field->fld_flags & FLD_array)
{
put_array(field, relation, (ISC_QUAD*) (buffer + field->fld_offset));
}
}
}
if (task->isStopped())
break;
BURP_free(buffer);
field = *pField;
put_array(field, relation, (ISC_QUAD*)(buffer + field->fld_offset));
}
BackupRelationTask::recordAdded(tdgbl);
}
if (xdr_buffer.lstr_address)
BURP_free(xdr_buffer.lstr_address);
BURP_verbose(108, SafeArg() << records);
// msg 108 %ld records written
//BURP_verbose(108, SafeArg() << records);
//// msg 108 %ld records written
request->free(&status_vector);
if (!status_vector.isSuccess())
BURP_error_redirect(&status_vector, 30);
// msg 30 isc_release_request failed
//if (request->release(status_vector))
// BURP_error_redirect(status_vector, 30);
//// msg 30 isc_release_request failed
}
@ -2161,7 +2007,19 @@ void put_relation( burp_rel* relation)
END_ERROR;
}
}
put(tdgbl, (UCHAR) rec_relation_end);
else if (!tdgbl->gbl_sw_meta)
{
FOR(REQUEST_HANDLE tdgbl->handles_put_relation_req_handle3)
FIRST 1 P IN RDB$PAGES WITH P.RDB$RELATION_ID EQ relation->rel_id
AND P.RDB$PAGE_TYPE = 4
SORTED BY DESCENDING P.RDB$PAGE_SEQUENCE
relation->rel_max_pp = P.RDB$PAGE_SEQUENCE;
END_FOR;
ON_ERROR
general_on_error();
END_ERROR;
}
put(tdgbl, (UCHAR)rec_relation_end);
}
@ -4530,3 +4388,443 @@ void write_user_privileges()
} // namespace
namespace Firebird {
/// class ReadRelationMeta
void ReadRelationMeta::setRelation(const burp_rel* relation, bool partition)
{
m_relation = relation;
// Build request BLR. There could be two kind of requests :
// a) partition == true
// SELECT * FROM relation
// WHERE dbkey >= MAKE_DBKEY(rel_id, 0, 0, :loPP)
// AND dbkey < MAKE_DBKEY(rel_id, 0, 0, :hiPP)
// b) partition = false
// SELECT * FROM relation
// Note, computed fields are not included into results
// CVC: A signed short isn't enough if the engine allows near 32K fields,
// each being char(1) ASCII in the worst case. Looking at BLR generation
// below, it's clear an extreme case won't compile => blr_length >= 32K.
// However, SSHORT is the limit for request_length in isc_compile_request.
m_fldCount = 1;
burp_fld* field;
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (!(field->fld_flags & FLD_computed))
{
m_fldCount += 2;
}
}
fb_assert(m_fldCount > 0 && m_fldCount * 9 > 0 && m_fldCount * 9 + 200 > 0);
// Time to generate blr to fetch data. Make sure we allocate a BLR buffer
// large enough to handle the per field overhead
UCHAR* const blr_buffer = m_blr.getBuffer(200 + m_fldCount * 9);
UCHAR* blr = blr_buffer;
add_byte(blr, blr_version4);
add_byte(blr, blr_begin);
// in message
m_inMgsNum = m_outMgsNum = 0;
if (partition)
{
add_byte(blr, blr_message);
add_byte(blr, m_inMgsNum); // Message number
add_word(blr, 2); //
add_byte(blr, blr_long); // loPP
add_byte(blr, 0);
add_byte(blr, blr_long); // hiPP
add_byte(blr, 0);
m_outMgsNum = m_inMgsNum + 1;
}
// out message
add_byte(blr, blr_message);
add_byte(blr, m_outMgsNum); // Message number
add_word(blr, m_fldCount); // Number of fields, counting eof
RCRD_OFFSET offset = 0;
SSHORT count = 0; // This is param count.
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
SSHORT alignment = 4;
FLD_LENGTH length = field->fld_length;
SSHORT dtype = field->fld_type;
if (field->fld_flags & FLD_array)
{
dtype = blr_blob;
length = 8;
}
switch (dtype)
{
case blr_text:
alignment = type_alignments[dtype_text];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
break;
case blr_varying:
alignment = type_alignments[dtype_varying];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
length += sizeof(USHORT);
break;
case blr_short:
alignment = type_alignments[dtype_short];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_long:
alignment = type_alignments[dtype_long];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_quad:
alignment = type_alignments[dtype_quad];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int64:
alignment = type_alignments[dtype_int64];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int128:
alignment = type_alignments[dtype_int128];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_double:
alignment = type_alignments[dtype_double];
add_byte(blr, field->fld_type);
break;
case blr_timestamp:
alignment = type_alignments[dtype_timestamp];
add_byte(blr, field->fld_type);
break;
case blr_timestamp_tz:
alignment = type_alignments[dtype_timestamp_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_time:
alignment = type_alignments[dtype_sql_time];
add_byte(blr, field->fld_type);
break;
case blr_sql_time_tz:
alignment = type_alignments[dtype_sql_time_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_date:
alignment = type_alignments[dtype_sql_date];
add_byte(blr, field->fld_type);
break;
case blr_float:
alignment = type_alignments[dtype_real];
add_byte(blr, field->fld_type);
break;
case blr_blob:
alignment = type_alignments[dtype_blob];
add_byte(blr, blr_quad);
add_byte(blr, 0);
break;
case blr_bool:
alignment = type_alignments[dtype_boolean];
add_byte(blr, field->fld_type);
break;
case blr_dec64:
case blr_dec128:
alignment = type_alignments[dtype];
add_byte(blr, field->fld_type);
break;
default:
BURP_error_redirect(NULL, 26, SafeArg() << field->fld_type);
// msg 26 datatype %ld not understood
break;
}
if (alignment)
offset = FB_ALIGN(offset, alignment);
field->fld_offset = offset;
field->fld_parameter = count++;
offset += length;
}
// Next, build fields for null flags
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_short);
add_byte(blr, 0);
offset = FB_ALIGN(offset, sizeof(SSHORT));
field->fld_missing_parameter = count++;
offset += sizeof(SSHORT);
}
// Finally, make up an EOF field
add_byte(blr, blr_short); // eof field
add_byte(blr, 0); // scale for eof field
const SSHORT eof_parameter = count++;
m_outRecLen = offset;
m_outEofOffset = FB_ALIGN(offset, sizeof(SSHORT));
// To be used later for the buffer size to receive data
m_outMsgLen = (USHORT) (m_outEofOffset + sizeof(SSHORT));
if (partition)
{
add_byte(blr, blr_receive);
add_byte(blr, m_inMgsNum);
}
// Build FOR loop, body, and eof handler
add_byte(blr, blr_for);
add_byte(blr, blr_rse);
add_byte(blr, 1); // count of relations
add_byte(blr, blr_rid);
add_word(blr, relation->rel_id);
add_byte(blr, 0); // context variable
if (partition)
{
// add boolean condition
add_byte(blr, blr_boolean);
add_byte(blr, blr_and);
add_byte(blr, blr_geq);
add_byte(blr, blr_dbkey);
add_byte(blr, 0);
add_byte(blr, blr_sys_function);
add_string(blr, "MAKE_DBKEY");
add_byte(blr, 4);
add_byte(blr, blr_literal); // relID
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, relation->rel_id);
add_byte(blr, blr_literal); // recNo
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_literal); // DP
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_parameter); // PP
add_byte(blr, m_inMgsNum);
add_word(blr, 0);
add_byte(blr, blr_lss);
add_byte(blr, blr_dbkey);
add_byte(blr, 0);
add_byte(blr, blr_sys_function);
add_string(blr, "MAKE_DBKEY");
add_byte(blr, 4);
add_byte(blr, blr_literal); // relID
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, relation->rel_id);
add_byte(blr, blr_literal); // recNo
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_literal); // DP
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_parameter); // PP
add_byte(blr, m_inMgsNum);
add_word(blr, 1);
}
// rse end
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, m_outMgsNum);
add_byte(blr, blr_begin);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 1);
add_byte(blr, blr_parameter);
add_byte(blr, m_outMgsNum);
add_word(blr, eof_parameter);
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_assignment);
add_byte(blr, blr_fid);
add_byte(blr, 0); // context
add_word(blr, field->fld_id);
add_byte(blr, blr_parameter2);
add_byte(blr, m_outMgsNum); // message number
add_word(blr, field->fld_parameter);
add_word(blr, field->fld_missing_parameter);
}
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, m_outMgsNum);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 0);
add_byte(blr, blr_parameter);
add_byte(blr, m_outMgsNum);
add_word(blr, eof_parameter);
add_byte(blr, blr_end);
add_byte(blr, blr_eoc);
const FB_SIZE_T blr_length = blr - blr_buffer;
m_blr.shrink(blr_length);
#ifdef DEBUG
if (debug_on)
fb_print_blr(blr_buffer, blr_length, NULL, NULL, 0);
#endif
}
void ReadRelationMeta::clear()
{
m_relation = NULL;
m_fldCount = 0;
m_blr.clear();
m_inMgsNum = m_outMgsNum = 0;
m_outMsgLen = m_outRecLen = m_outEofOffset = 0;
}
/// class ReadRelationReq
void ReadRelationReq::reset(const ReadRelationMeta* meta)
{
if (m_meta == meta && meta != NULL && m_relation == meta->m_relation)
return;
if (m_meta)
clear();
m_meta = meta;
if (m_meta)
{
m_relation = m_meta->m_relation;
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
FbLocalStatus status;
compile(&status, DB);
if (status->getState() & IStatus::STATE_ERRORS)
{
BURP_error_redirect(&status, 27);
// msg 27 isc_compile_request failed
fb_print_blr(m_meta->m_blr.begin(), m_meta->m_blr.getCount(), NULL, NULL, 0);
}
UCHAR* data = m_outMsg.getBuffer(m_meta->m_outMsgLen);
m_eof = reinterpret_cast<SSHORT*> (data + m_meta->m_outEofOffset);
}
else
{
m_relation = NULL;
m_request = 0;
m_eof = NULL;
}
memset(&m_inMgs, 0, sizeof(m_inMgs));
}
void ReadRelationReq::clear()
{
m_relation = NULL;
m_meta = NULL;
m_eof = NULL;
m_outMsg.clear();
if (m_request)
{
FbLocalStatus status;
release(&status);
}
}
void ReadRelationReq::compile(CheckStatusWrapper* status, IAttachment* att)
{
m_request = att->compileRequest(status, m_meta->m_blr.getCount(), m_meta->m_blr.begin());
}
void ReadRelationReq::release(CheckStatusWrapper* status)
{
if (m_request)
{
m_request->free(status);
if (!(status->getState() & IStatus::STATE_ERRORS))
{
m_request = nullptr;
clear();
}
}
}
void ReadRelationReq::setParams(ULONG loPP, ULONG hiPP)
{
m_inMgs.loPP = loPP;
m_inMgs.hiPP = hiPP;
}
void ReadRelationReq::start(CheckStatusWrapper* status, ITransaction* tran)
{
if (m_meta->haveInputs())
m_request->startAndSend(status, tran, 0, m_meta->m_inMgsNum, sizeof(m_inMgs), &m_inMgs);
else
m_request->start(status, tran, 0);
}
void ReadRelationReq::receive(CheckStatusWrapper* status)
{
m_request->receive(status, 0, m_meta->m_outMgsNum, m_meta->m_outMsgLen, m_outMsg.begin());
}
/// class BackupRelationTask
bool BackupRelationTask::tableReader(Item& item)
{
item.m_request.reset(&m_metadata);
item.m_request.setParams(item.m_ppSequence, item.m_ppSequence + 1);
put_data(item.m_relation, &item.m_request);
item.getBackupTask()->releaseBuffer(item);
return true;
}
} // namespace Firebird

View File

@ -61,6 +61,7 @@
#include "../common/os/os_utils.h"
#include "../burp/burpswi.h"
#include "../common/db_alias.h"
#include "../burp/BurpTasks.h"
#ifdef HAVE_CTYPE_H
#include <ctype.h>
@ -83,8 +84,8 @@
#include <sys/file.h>
#endif
using namespace Firebird;
using MsgFormat::SafeArg;
using Firebird::FbLocalStatus;
const char* fopen_write_type = "w";
const char* fopen_read_type = "r";
@ -596,6 +597,7 @@ int gbak(Firebird::UtilSvc* uSvc)
tdgbl->gbl_sw_old_descriptions = false;
tdgbl->gbl_sw_mode = false;
tdgbl->gbl_sw_skip_count = 0;
tdgbl->gbl_sw_par_workers = 1;
tdgbl->action = NULL;
burp_fil* file = NULL;
@ -868,6 +870,19 @@ int gbak(Firebird::UtilSvc* uSvc)
// skip a service specification
in_sw_tab->in_sw_state = false;
break;
case IN_SW_BURP_PARALLEL_WORKERS:
if (++itr >= argc)
{
BURP_error(407, true);
// msg 407 parallel workers parameter missing
}
tdgbl->gbl_sw_par_workers = get_number(argv[itr]);
if (tdgbl->gbl_sw_par_workers <= 0)
{
BURP_error(408, true, argv[itr]);
// msg 408 expected parallel workers, encountered "%s"
}
break;
case IN_SW_BURP_Y:
{
// want to do output redirect handling now instead of waiting
@ -965,6 +980,11 @@ int gbak(Firebird::UtilSvc* uSvc)
BURP_error(334, true, SafeArg() << in_sw_tab->in_sw_name);
tdgbl->gbl_sw_convert_ext_tables = true;
break;
case IN_SW_BURP_DIRECT_IO:
if (tdgbl->gbl_sw_direct_io)
BURP_error(334, true, SafeArg() << in_sw_tab->in_sw_name);
tdgbl->gbl_sw_direct_io = true;
break;
case IN_SW_BURP_E:
if (!tdgbl->gbl_sw_compress)
BURP_error(334, true, SafeArg() << in_sw_tab->in_sw_name);
@ -1377,9 +1397,16 @@ int gbak(Firebird::UtilSvc* uSvc)
action = open_files(file1, &file2, sw_replace, dpb);
if (tdgbl->stdIoMode && tdgbl->uSvc->isService())
tdgbl->gbl_sw_direct_io = false;
if (tdgbl->gbl_sw_direct_io && tdgbl->gbl_sw_blk_factor <= 1)
tdgbl->io_buffer_size = GBAK_DIRECT_IO_BUFFER_SIZE;
MVOL_init(tdgbl->io_buffer_size);
int result;
tdgbl->gbl_dpb_data.add(dpb.getBuffer(), dpb.getBufferLength());
tdgbl->uSvc->started();
switch (action)
@ -1490,7 +1517,9 @@ void BURP_abort()
* Abandon a failed operation.
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
USHORT code = tdgbl->action && tdgbl->action->act_action == ACT_backup_fini ? 351 : 83;
// msg 351 Error closing database, but backup file is OK
// msg 83 Exiting before completion due to errors
@ -1517,7 +1546,8 @@ void BURP_error(USHORT errcode, bool abort, const SafeArg& arg)
* Functional description
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
tdgbl->uSvc->setServiceStatus(burp_msg_fac, errcode, arg);
tdgbl->uSvc->started();
@ -1564,6 +1594,7 @@ void BURP_error_redirect(Firebird::IStatus* status_vector, USHORT errcode, const
* Issue error message. Output messages then abort.
*
**************************************/
BurpMaster master;
BURP_print_status(true, status_vector);
BURP_error(errcode, true, arg);
@ -1672,6 +1703,7 @@ void BURP_print(bool err, USHORT number, const SafeArg& arg)
* will accept.
*
**************************************/
BurpMaster master;
BURP_msg_partial(err, 169); // msg 169: gbak:
BURP_msg_put(err, number, arg);
@ -1692,6 +1724,7 @@ void BURP_print(bool err, USHORT number, const char* str)
* will accept.
*
**************************************/
BurpMaster master;
static const SafeArg dummy;
BURP_msg_partial(err, 169, dummy); // msg 169: gbak:
@ -1714,11 +1747,13 @@ void BURP_print_status(bool err, Firebird::IStatus* status_vector)
**************************************/
if (status_vector)
{
BurpMaster master;
BurpGlobals* tdgbl = master.get();
const ISC_STATUS* vector = status_vector->getErrors();
if (err)
{
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
tdgbl->uSvc->setServiceStatus(vector);
tdgbl->uSvc->started();
@ -1759,6 +1794,9 @@ void BURP_print_warning(Firebird::IStatus* status)
**************************************/
if (status && (status->getState() & Firebird::IStatus::STATE_WARNINGS))
{
BurpMaster master;
BurpGlobals* tdgbl = master.get();
// print the warning message
const ISC_STATUS* vector = status->getWarnings();
SCHAR s[1024];
@ -1791,7 +1829,8 @@ void BURP_verbose(USHORT number, const SafeArg& arg)
* If not verbose then calls yielding function.
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
if (tdgbl->gbl_sw_verbose)
BURP_message(number, arg, true);
@ -1812,7 +1851,8 @@ void BURP_message(USHORT number, const MsgFormat::SafeArg& arg, bool totals)
* Calls BURP_msg for formatting & displaying a message.
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
if (totals)
tdgbl->print_stats_header();
@ -2116,7 +2156,8 @@ static gbak_action open_files(const TEXT* file1,
#ifdef WIN_NT
if ((fil->fil_fd = NT_tape_open(nm.c_str(), MODE_WRITE, CREATE_ALWAYS)) == INVALID_HANDLE_VALUE)
#else
if ((fil->fil_fd = os_utils::open(nm.c_str(), MODE_WRITE, open_mask)) == -1)
const int wmode = MODE_WRITE | (tdgbl->gbl_sw_direct_io ? O_DIRECT : 0);
if ((fil->fil_fd = open(fil->fil_name.c_str(), wmode, open_mask)) == -1)
#endif // WIN_NT
{
@ -2427,7 +2468,8 @@ static void burp_output(bool err, const SCHAR* format, ...)
**************************************/
va_list arglist;
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
if (tdgbl->sw_redirect != NOOUTPUT && format[0] != '\0')
{
@ -2709,7 +2751,10 @@ bool BurpGlobals::skipRelation(const char* name)
{ false, false, true} // NM p
};
return result[checkPattern(skipDataMatcher, name)][checkPattern(includeDataMatcher, name)];
const enum Pattern res1 = checkPattern(skipDataMatcher, name);
const enum Pattern res2 = checkPattern(includeDataMatcher, name);
return result[res1][res2];
}
void BurpGlobals::read_stats(SINT64* stats)

View File

@ -745,6 +745,7 @@ struct burp_rel
SSHORT rel_name_length;
GDS_NAME rel_name;
GDS_NAME rel_owner; // relation owner, if not us
ULONG rel_max_pp; // max pointer page sequence number
};
enum burp_rel_flags_vals {
@ -954,6 +955,10 @@ public:
: ThreadData(ThreadData::tddGBL),
GblPool(us->isService()),
defaultCollations(getPool()),
gbl_dpb_data(*getDefaultMemoryPool()),
master(true),
taskItem(NULL),
gbl_sw_par_workers(1),
uSvc(us),
verboseInterval(10000),
flag_on_line(true),
@ -1001,6 +1006,7 @@ public:
bool gbl_sw_mode;
bool gbl_sw_mode_val;
bool gbl_sw_overwrite;
bool gbl_sw_direct_io;
bool gbl_sw_zip;
const SCHAR* gbl_sw_keyholder;
const SCHAR* gbl_sw_crypt;
@ -1014,6 +1020,7 @@ public:
SLONG gbl_sw_page_buffers;
burp_fil* gbl_sw_files;
burp_fil* gbl_sw_backup_files;
int gbl_sw_par_workers;
gfld* gbl_global_fields;
unsigned gbl_network_protocol;
burp_act* action;
@ -1068,6 +1075,7 @@ public:
FB_UINT64 mvol_cumul_count;
UCHAR* mvol_io_ptr;
int mvol_io_cnt;
UCHAR* mvol_io_memory; // as allocated, not aligned pointer
UCHAR* mvol_io_buffer;
UCHAR* mvol_io_volume;
UCHAR* mvol_io_header;
@ -1084,6 +1092,7 @@ public:
Firebird::IAttachment* db_handle;
Firebird::ITransaction* tr_handle;
Firebird::ITransaction* global_trans;
TraNumber tr_snapshot;
DESC file_desc;
int exit_code;
UCHAR* head_of_mem_list;
@ -1147,6 +1156,7 @@ public:
Firebird::IRequest* handles_put_index_req_handle7;
Firebird::IRequest* handles_put_relation_req_handle1;
Firebird::IRequest* handles_put_relation_req_handle2;
Firebird::IRequest* handles_put_relation_req_handle3;
Firebird::IRequest* handles_store_blr_gen_id_req_handle1;
Firebird::IRequest* handles_write_function_args_req_handle1;
Firebird::IRequest* handles_write_function_args_req_handle2;
@ -1181,7 +1191,10 @@ public:
Firebird::Array<Firebird::Pair<Firebird::NonPooled<Firebird::MetaString, Firebird::MetaString> > >
defaultCollations;
Firebird::Array<UCHAR> gbl_dpb_data;
Firebird::UtilSvc* uSvc;
bool master; // set for master thread only
void* taskItem; // current task item, if any
ULONG verboseInterval; // How many records should be backed up or restored before we show this message
bool flag_on_line; // indicates whether we will bring the database on-line
bool firstMap; // this is the first time we entered get_mapping()

View File

@ -99,6 +99,9 @@ const int IN_SW_BURP_CRYPT = 51; // name of crypt plugin
const int IN_SW_BURP_INCLUDE_DATA = 52; // backup data from tables
const int IN_SW_BURP_REPLICA = 53; // replica mode
const int IN_SW_BURP_PARALLEL_WORKERS = 54; // parallel workers
const int IN_SW_BURP_DIRECT_IO = 55; // direct IO for backup files
/**************************************************************************/
static const char* const BURP_SW_MODE_NONE = "NONE";
@ -119,6 +122,8 @@ static const Switches::in_sw_tab_t reference_burp_in_sw_table[] =
// msg 73: @1CREATE_DATABASE create database from backup file
{IN_SW_BURP_CO, isc_spb_bkp_convert, "CONVERT", 0, 0, 0, false, true, 254, 2, NULL, boBackup},
// msg 254: @1CO(NVERT) backup external files as tables
{IN_SW_BURP_DIRECT_IO, isc_spb_bkp_direct_io,"DIRECT_IO", 0, 0, 0, false, true, 409, 1, NULL, boBackup},
// msg 409: @1D(IRECT_IO) direct IO for backup file(s)
{IN_SW_BURP_CRYPT, isc_spb_bkp_crypt, "CRYPT", 0, 0, 0, false, false, 373, 3, NULL, boGeneral},
// msg 373:@1CRY(PT) plugin name
{IN_SW_BURP_E, isc_spb_bkp_expand, "EXPAND", 0, 0, 0, false, true, 97, 1, NULL, boBackup},
@ -164,6 +169,8 @@ static const Switches::in_sw_tab_t reference_burp_in_sw_table[] =
// msg 186: @1OLD_DESCRIPTIONS save old style metadata descriptions
{IN_SW_BURP_P, isc_spb_res_page_size, "PAGE_SIZE", 0, 0, 0, false, false, 101, 1, NULL, boRestore},
// msg 101: @1PAGE_SIZE override default page size
{IN_SW_BURP_PARALLEL_WORKERS, isc_spb_bkp_parallel_workers, "PARALLEL", 0, 0, 0, false, false, 406, 3, NULL, boGeneral},
// msg 406: @1PAR(ALLEL) parallel workers
{IN_SW_BURP_PASS, 0, "PASSWORD", 0, 0, 0, false, false, 190, 3, NULL, boGeneral},
// msg 190: @1PA(SSWORD) Firebird password
{IN_SW_BURP_RECREATE, 0, "RECREATE_DATABASE", 0, 0, 0, false, false, 284, 1, NULL, boMain},

View File

@ -43,6 +43,7 @@
#include "../burp/burp_proto.h"
#include "../burp/mvol_proto.h"
#include "../burp/split/spit.h"
#include "../burp/BurpTasks.h"
#include "../yvalve/gds_proto.h"
#include "../common/gdsassert.h"
#include "../common/os/os_utils.h"
@ -638,7 +639,8 @@ FB_UINT64 mvol_fini_write(BurpGlobals* tdgbl, int* io_cnt, UCHAR** io_ptr)
}
tdgbl->file_desc = INVALID_HANDLE_VALUE;
BURP_free(tdgbl->mvol_io_header);
BURP_free(tdgbl->mvol_io_memory);
tdgbl->mvol_io_memory = NULL;
tdgbl->mvol_io_header = NULL;
tdgbl->mvol_io_buffer = NULL;
tdgbl->blk_io_cnt = 0;
@ -803,7 +805,9 @@ void mvol_init_write(BurpGlobals* tdgbl, const char* file_name, int* cnt, UCHAR*
tdgbl->mvol_actual_buffer_size = tdgbl->mvol_io_buffer_size;
const ULONG temp_buffer_size = tdgbl->mvol_io_buffer_size * tdgbl->gbl_sw_blk_factor;
tdgbl->mvol_io_ptr = tdgbl->mvol_io_buffer = BURP_alloc(temp_buffer_size + MAX_HEADER_SIZE);
tdgbl->mvol_io_memory = BURP_alloc(temp_buffer_size + MAX_HEADER_SIZE * 2);
tdgbl->mvol_io_ptr = tdgbl->mvol_io_buffer =
(UCHAR*) FB_ALIGN((U_IPTR) tdgbl->mvol_io_memory, MAX_HEADER_SIZE);
tdgbl->mvol_io_cnt = tdgbl->mvol_actual_buffer_size;
while (!write_header(tdgbl->file_desc, temp_buffer_size, false))
@ -830,6 +834,14 @@ void mvol_init_write(BurpGlobals* tdgbl, const char* file_name, int* cnt, UCHAR*
void MVOL_read(BurpGlobals* tdgbl)
{
// Setup our pointer
if (!tdgbl->master)
{
// hvlad: it will throw ExcReadDone exception when there is nothing to read
Firebird::RestoreRelationTask::renewBuffer(tdgbl);
tdgbl->mvol_io_ptr = tdgbl->mvol_io_buffer;
return;
}
tdgbl->gbl_io_ptr = tdgbl->gbl_compress_buffer;
tdgbl->gbl_io_cnt = unzip_read_block(tdgbl, tdgbl->gbl_io_ptr, ZC_BUFSIZE);
}
@ -875,6 +887,8 @@ static void os_read(int* cnt, UCHAR** ptr)
{
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
fb_assert(tdgbl->master);
for (;;)
{
tdgbl->mvol_io_cnt = read(tdgbl->file_desc, tdgbl->mvol_io_buffer, tdgbl->mvol_io_buffer_size);
@ -918,6 +932,7 @@ static void os_read(int* cnt, UCHAR** ptr)
{
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
fb_assert(tdgbl->master);
fb_assert(tdgbl->blk_io_cnt <= 0);
for (;;)
@ -1025,11 +1040,13 @@ DESC NT_tape_open(const char* name, ULONG mode, ULONG create)
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
const DWORD flags = (mode == MODE_WRITE && tdgbl->gbl_sw_direct_io) ? FILE_FLAG_NO_BUFFERING : 0;
if (strnicmp(name, "\\\\.\\tape", 8))
{
handle = CreateFile(name, mode,
mode == MODE_WRITE ? 0 : FILE_SHARE_READ,
NULL, create, FILE_ATTRIBUTE_NORMAL, NULL);
NULL, create, FILE_ATTRIBUTE_NORMAL | flags, NULL);
}
else
{
@ -1048,7 +1065,7 @@ DESC NT_tape_open(const char* name, ULONG mode, ULONG create)
//
handle = CreateFile(name, mode | MODE_READ,
mode == MODE_WRITE ? FILE_SHARE_WRITE : FILE_SHARE_READ,
0, OPEN_EXISTING, 0, NULL);
NULL, OPEN_EXISTING, flags, NULL);
if (handle != INVALID_HANDLE_VALUE)
{
// emulate UNIX rewinding the tape on open:
@ -1082,6 +1099,12 @@ DESC NT_tape_open(const char* name, ULONG mode, ULONG create)
//
void MVOL_write(BurpGlobals* tdgbl)
{
if (!tdgbl->master)
{
Firebird::BackupRelationTask::renewBuffer(tdgbl);
return;
}
fb_assert(tdgbl->gbl_io_ptr >= tdgbl->gbl_compress_buffer);
fb_assert(tdgbl->gbl_io_ptr <= tdgbl->gbl_compress_buffer + ZC_BUFSIZE);
@ -1098,6 +1121,14 @@ UCHAR mvol_write(const UCHAR c, int* io_cnt, UCHAR** io_ptr)
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
if (!tdgbl->master)
{
Firebird::BackupRelationTask::renewBuffer(tdgbl);
*(*io_ptr)++ = c;
(*io_cnt)--;
return c;
}
const ULONG size_to_write = BURP_UP_TO_BLOCK(*io_ptr - tdgbl->mvol_io_buffer);
FB_UINT64 left = size_to_write;
@ -1317,10 +1348,17 @@ const UCHAR* MVOL_write_block(BurpGlobals* tdgbl, const UCHAR* ptr, ULONG count)
// If buffer full, write it
if (tdgbl->gbl_io_cnt <= 0)
{
zip_write_block(tdgbl, tdgbl->gbl_compress_buffer, tdgbl->gbl_io_ptr - tdgbl->gbl_compress_buffer, false);
if (!tdgbl->master)
{
Firebird::BackupRelationTask::renewBuffer(tdgbl);
}
else
{
zip_write_block(tdgbl, tdgbl->gbl_compress_buffer, tdgbl->gbl_io_ptr - tdgbl->gbl_compress_buffer, false);
tdgbl->gbl_io_ptr = tdgbl->gbl_compress_buffer;
tdgbl->gbl_io_cnt = ZC_BUFSIZE;
tdgbl->gbl_io_ptr = tdgbl->gbl_compress_buffer;
tdgbl->gbl_io_cnt = ZC_BUFSIZE;
}
}
const ULONG n = MIN(count, (ULONG) tdgbl->gbl_io_cnt);
@ -1510,7 +1548,11 @@ static DESC next_volume( DESC handle, ULONG mode, bool full_buffer)
new_desc = NT_tape_open(new_file, mode, OPEN_ALWAYS);
if (new_desc == INVALID_HANDLE_VALUE)
#else
new_desc = os_utils::open(new_file, mode, open_mask);
ULONG mode2 = mode;
if (mode == MODE_WRITE && tdgbl->gbl_sw_direct_io)
mode2 |= O_DIRECT;
new_desc = open(new_file, mode2, open_mask);
if (new_desc < 0)
#endif // WIN_NT
{
@ -2001,7 +2043,8 @@ static bool write_header(DESC handle, ULONG backup_buffer_size, bool full_buffer
put(tdgbl, att_end);
tdgbl->mvol_io_data = tdgbl->mvol_io_ptr;
tdgbl->mvol_io_data = (UCHAR*) FB_ALIGN((U_IPTR) tdgbl->mvol_io_ptr, MAX_HEADER_SIZE);
fb_assert(tdgbl->mvol_io_data == tdgbl->mvol_io_header + MAX_HEADER_SIZE);
}
else
{

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,7 @@ const int K_BYTES = 1024;
const int IO_BUFFER_SIZE = (16 * K_BYTES);
const int SVC_IO_BUFFER_SIZE = (16 * (IO_BUFFER_SIZE));
const int GBAK_IO_BUFFER_SIZE = SVC_IO_BUFFER_SIZE;
const int GBAK_DIRECT_IO_BUFFER_SIZE = 64 * K_BYTES;
const int M_BYTES = (K_BYTES * K_BYTES);
const int G_BYTES = (K_BYTES * M_BYTES);

342
src/common/Task.cpp Normal file
View File

@ -0,0 +1,342 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: Task.cpp
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#include "../common/Task.h"
using namespace Firebird;
namespace Jrd {
/// class WorkerThread
THREAD_ENTRY_DECLARE WorkerThread::workerThreadRoutine(THREAD_ENTRY_PARAM arg)
{
WorkerThread* thd = static_cast<WorkerThread*> (arg);
return (THREAD_ENTRY_RETURN)(IPTR) thd->threadRoutine();
}
WorkerThread* WorkerThread::start(Coordinator* coordinator)
{
WorkerThread* thd = new WorkerThread(coordinator);
try
{
Thread::start(WorkerThread::workerThreadRoutine, thd, THREAD_medium, &thd->m_thdHandle);
}
catch (const status_exception&)
{
delete thd;
throw;
}
return thd;
}
int WorkerThread::threadRoutine()
{
m_state = IDLE;
m_signalSem.release();
while(m_state != STOPPING)
{
m_waitSem.enter();
if (m_state == RUNNING && m_worker != NULL)
{
m_worker->Work(this);
m_worker = NULL;
}
if (m_state == RUNNING)
{
m_state = IDLE;
m_signalSem.release();
}
if (m_state == STOPPING)
break;
}
return 0;
}
void WorkerThread::RunWorker(Worker* worker)
{
fb_assert(m_worker == NULL);
fb_assert(m_state == IDLE);
m_worker = worker;
m_state = RUNNING;
m_waitSem.release();
}
bool WorkerThread::WaitForState(STATE state, int timeout)
{
while (m_state != state) // || m_state == old_state - consume old signals ?
{
if (timeout >= 0)
{
m_signalSem.tryEnter(0, timeout);
break;
}
else
m_signalSem.enter();
}
return (m_state == state);
}
void WorkerThread::Shutdown(bool wait)
{
if (m_state == SHUTDOWN)
return;
m_state = STOPPING;
m_waitSem.release();
if (wait)
{
Thread::waitForCompletion(m_thdHandle);
m_state = SHUTDOWN;
}
}
/// class Worker
bool Worker::Work(WorkerThread* thd)
{
fb_assert(m_state == READY);
m_state = WORKING;
m_thread = thd;
Task::WorkItem* workItem = NULL;
while (true)
{
if (m_thread && m_thread->getState() != WorkerThread::RUNNING)
break;
if (!m_task->GetWorkItem(&workItem))
break;
if (!m_task->Handler(*workItem))
break;
}
m_thread = NULL;
m_state = IDLE;
return true;
}
bool Worker::WaitFor(int timeout)
{
if (m_state == IDLE)
return true;
if (m_thread == NULL)
return false;
m_thread->WaitForState(WorkerThread::IDLE, timeout);
return (m_state == IDLE);
}
/// class Coordinator
Coordinator::~Coordinator()
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
for (WorkerThread** p = m_activeThreads.begin(); p < m_activeThreads.end(); p++)
(*p)->Shutdown(false);
while (!m_activeThreads.isEmpty())
{
WorkerThread* thd = m_activeThreads.pop();
{
MutexUnlockGuard unlock(m_mutex, FB_FUNCTION);
thd->Shutdown(true);
}
delete thd;
}
while (!m_idleThreads.isEmpty())
{
WorkerThread* thd = m_idleThreads.pop();
{
MutexUnlockGuard unlock(m_mutex, FB_FUNCTION);
thd->Shutdown(true);
}
delete thd;
}
while (!m_activeWorkers.isEmpty())
{
Worker* w = m_activeWorkers.back();
MutexUnlockGuard unlock(m_mutex, FB_FUNCTION);
w->WaitFor(-1);
}
while (!m_idleWorkers.isEmpty())
{
Worker* w = m_idleWorkers.pop();
delete w;
}
}
void Coordinator::RunSync(Task* task)
{
int cntWorkers = setupWorkers(task->GetMaxWorkers());
if (cntWorkers < 1)
return;
HalfStaticArray<WorkerAndThd, 8> taskWorkers(*m_pool, cntWorkers);
Worker* syncWorker = getWorker();
taskWorkers.push(WorkerAndThd(syncWorker, NULL));
for (int i = 1; i < cntWorkers; i++)
{
WorkerThread* thd = getThread();
if (thd)
{
Worker* w = getWorker();
taskWorkers.push(WorkerAndThd(w, thd));
w->SetTask(task);
thd->RunWorker(w);
}
}
// run syncronously
syncWorker->SetTask(task);
syncWorker->Work(NULL);
// wait for all workes
for (int i = 0; i < cntWorkers; i++)
{
WorkerAndThd& wt = taskWorkers[i];
if (wt.thread)
{
if (!wt.worker->Idle())
wt.thread->WaitForState(WorkerThread::IDLE, -1);
releaseThread(wt.thread);
}
releaseWorker(wt.worker);
}
}
Worker* Coordinator::getWorker()
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
Worker* w = NULL;
if (!m_idleWorkers.isEmpty())
{
w = m_idleWorkers.pop();
m_activeWorkers.push(w);
}
return w;
}
void Coordinator::releaseWorker(Worker* w)
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
FB_SIZE_T pos;
if (m_activeWorkers.find(w, pos))
{
m_activeWorkers.remove(pos);
m_idleWorkers.push(w);
}
fb_assert(m_idleWorkers.find(w, pos));
}
int Coordinator::setupWorkers(int count)
{
// TODO adjust count
for (int i = m_workers.getCount(); i < count; i++)
{
Worker* w = FB_NEW_POOL(*m_pool) Worker(this);
m_workers.add(w);
m_idleWorkers.push(w);
}
return count;
}
WorkerThread* Coordinator::getThread()
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
WorkerThread* thd = NULL;
if (!m_idleThreads.isEmpty())
thd = m_idleThreads.pop();
else
{
thd = WorkerThread::start(this);
if (thd)
thd->WaitForState(WorkerThread::IDLE, -1);
}
if (thd)
{
fb_assert(thd->getState() == WorkerThread::IDLE);
m_activeThreads.push(thd);
}
return thd;
}
void Coordinator::releaseThread(WorkerThread* thd)
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
const WorkerThread::STATE thdState = thd->getState();
if (thdState != WorkerThread::IDLE)
{
fb_assert(thdState == WorkerThread::STOPPING || thdState == WorkerThread::SHUTDOWN);
return;
}
FB_SIZE_T pos;
if (m_activeThreads.find(thd, pos))
{
m_activeThreads.remove(pos);
m_idleThreads.push(thd);
}
else
{
fb_assert(false);
if (!m_idleThreads.find(thd, pos))
m_idleThreads.push(thd);
}
}
} // namespace Jrd

213
src/common/Task.h Normal file
View File

@ -0,0 +1,213 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: Task.h
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#ifndef JRD_TASK_H
#define JRD_TASK_H
#include "firebird.h"
#include "../common/classes/alloc.h"
#include "../common/classes/array.h"
#include "../common/classes/locks.h"
#include "../common/classes/semaphore.h"
#include "../common/ThreadStart.h"
namespace Jrd
{
class Task;
class Worker;
class Coordinator;
class WorkerThread;
// Task (probably big one), contains parameters, could break whole task by
// smaller items (WorkItem), handle items, track common running state, track
// results and error happens.
class Task
{
public:
Task() {};
virtual ~Task() {};
// task item to handle
class WorkItem
{
public:
WorkItem(Task* task) :
m_task(task)
{}
virtual ~WorkItem() {}
Task* m_task;
};
// task item handler
virtual bool Handler(WorkItem&) = 0;
virtual bool GetWorkItem(WorkItem**) = 0;
virtual bool GetResult(Firebird::IStatus* status) = 0;
// evaluate task complexity and recommend number of parallel workers
virtual int GetMaxWorkers() { return 1; }
};
// Worker: handle work items, optionally uses separate thread
class Worker
{
public:
Worker(Coordinator* coordinator) :
m_coordinator(coordinator),
m_thread(NULL),
m_task(NULL),
m_state(IDLE)
{
}
virtual ~Worker() {}
void SetTask(Task* task)
{
m_task = task;
m_state = READY;
}
bool Work(WorkerThread* thd);
//void SignalStop();
bool Idle() const { return m_state == IDLE; };
bool WaitFor(int timeout = -1);
protected:
enum STATE {IDLE, READY, WORKING};
Coordinator* const m_coordinator; // set in constructor, not changed
WorkerThread* m_thread;
Task* m_task;
STATE m_state;
};
// Accept Task(s) to handle, creates and assigns Workers to work on task(s),
// bind Workers to Threads, synchronize task completion and get results.
class Coordinator
{
public:
Coordinator(Firebird::MemoryPool* pool) :
m_pool(pool),
m_workers(*m_pool),
m_idleWorkers(*m_pool),
m_activeWorkers(*m_pool),
m_idleThreads(*m_pool),
m_activeThreads(*m_pool)
{}
~Coordinator();
// AddTask(Task)
void RunSync(Task*);
private:
struct WorkerAndThd
{
WorkerAndThd() :
worker(NULL),
thread(NULL)
{}
WorkerAndThd(Worker* w, WorkerThread* t) :
worker(w),
thread(t)
{}
Worker* worker;
WorkerThread* thread;
};
// determine how many workers needed, allocate max possible number
// of workers, make it all idle, return number of allocated workers
int setupWorkers(int count);
Worker* getWorker();
void releaseWorker(Worker*);
WorkerThread* getThread();
void releaseThread(WorkerThread*);
Firebird::MemoryPool* m_pool;
Firebird::Mutex m_mutex;
Firebird::HalfStaticArray<Worker*, 8> m_workers;
Firebird::HalfStaticArray<Worker*, 8> m_idleWorkers;
Firebird::HalfStaticArray<Worker*, 8> m_activeWorkers;
// todo: move to thread pool
Firebird::HalfStaticArray<WorkerThread*, 8> m_idleThreads;
Firebird::HalfStaticArray<WorkerThread*, 8> m_activeThreads;
};
class WorkerThread
{
public:
enum STATE {STARTING, IDLE, RUNNING, STOPPING, SHUTDOWN};
~WorkerThread()
{
Shutdown(true);
#ifdef WIN_NT
if (m_thdHandle != INVALID_HANDLE_VALUE)
CloseHandle(m_thdHandle);
#endif
}
static WorkerThread* start(Coordinator*);
void RunWorker(Worker*);
bool WaitForState(STATE state, int timeout);
void Shutdown(bool wait);
STATE getState() const { return m_state; }
private:
WorkerThread(Coordinator* coordinator) :
m_coordinator(coordinator),
m_worker(NULL),
m_state(STARTING)
{}
static THREAD_ENTRY_DECLARE workerThreadRoutine(THREAD_ENTRY_PARAM);
int threadRoutine();
Coordinator* const m_coordinator;
Worker* m_worker;
Firebird::Semaphore m_waitSem; // idle thread waits on this semaphore to start work or go out
Firebird::Semaphore m_signalSem; // semaphore is released when thread going idle
STATE m_state;
Thread::Handle m_thdHandle;
};
} // namespace Jrd
#endif // JRD_TASK_H

View File

@ -346,6 +346,7 @@ ClumpletReader::ClumpletType ClumpletReader::getClumpletType(UCHAR tag) const
return StringSpb;
case isc_spb_bkp_factor:
case isc_spb_bkp_length:
case isc_spb_bkp_parallel_workers:
case isc_spb_res_length:
case isc_spb_res_buffers:
case isc_spb_res_page_size:
@ -369,6 +370,7 @@ ClumpletReader::ClumpletType ClumpletReader::getClumpletType(UCHAR tag) const
case isc_spb_rpr_commit_trans:
case isc_spb_rpr_rollback_trans:
case isc_spb_rpr_recover_two_phase:
case isc_spb_rpr_par_workers:
return IntSpb;
case isc_spb_rpr_commit_trans_64:
case isc_spb_rpr_rollback_trans_64:

View File

@ -412,6 +412,12 @@ void Config::checkValues()
checkIntForLoBound(KEY_INLINE_SORT_THRESHOLD, 0, true);
checkIntForLoBound(KEY_MAX_STATEMENT_CACHE_SIZE, 0, true);
checkIntForLoBound(KEY_MAX_PARALLEL_WORKERS, 1, true);
checkIntForHiBound(KEY_MAX_PARALLEL_WORKERS, 64, false); // todo: detect number of available cores
checkIntForLoBound(KEY_PARALLEL_WORKERS, 1, true);
checkIntForHiBound(KEY_MAX_PARALLEL_WORKERS, values[KEY_MAX_PARALLEL_WORKERS].intVal, false);
}

View File

@ -190,6 +190,8 @@ enum ConfigKey
KEY_INLINE_SORT_THRESHOLD,
KEY_TEMP_PAGESPACE_DIR,
KEY_MAX_STATEMENT_CACHE_SIZE,
KEY_PARALLEL_WORKERS,
KEY_MAX_PARALLEL_WORKERS,
MAX_CONFIG_KEY // keep it last
};
@ -306,7 +308,9 @@ constexpr ConfigEntry entries[MAX_CONFIG_KEY] =
{TYPE_BOOLEAN, "UseFileSystemCache", false, true},
{TYPE_INTEGER, "InlineSortThreshold", false, 1000}, // bytes
{TYPE_STRING, "TempTableDirectory", false, ""},
{TYPE_INTEGER, "MaxStatementCacheSize", false, 2 * 1048576} // bytes
{TYPE_INTEGER, "MaxStatementCacheSize", false, 2 * 1048576}, // bytes
{TYPE_INTEGER, "ParallelWorkers", true, 1},
{TYPE_INTEGER, "MaxParallelWorkers", true, 1}
};
@ -633,6 +637,10 @@ public:
CONFIG_GET_PER_DB_STR(getTempPageSpaceDirectory, KEY_TEMP_PAGESPACE_DIR);
CONFIG_GET_PER_DB_INT(getMaxStatementCacheSize, KEY_MAX_STATEMENT_CACHE_SIZE);
CONFIG_GET_GLOBAL_INT(getParallelWorkers, KEY_PARALLEL_WORKERS);
CONFIG_GET_GLOBAL_INT(getMaxParallelWorkers, KEY_MAX_PARALLEL_WORKERS);
};
// Implementation of interface to access master configuration file

View File

@ -129,6 +129,8 @@
#define isc_dpb_decfloat_round 94
#define isc_dpb_decfloat_traps 95
#define isc_dpb_clear_map 96
#define isc_dpb_parallel_workers 100
#define isc_dpb_worker_attach 101
/**************************************************/
@ -421,6 +423,7 @@
#define isc_spb_bkp_keyname 17
#define isc_spb_bkp_crypt 18
#define isc_spb_bkp_include_data 19
#define isc_spb_bkp_parallel_workers 21
#define isc_spb_bkp_ignore_checksums 0x01
#define isc_spb_bkp_ignore_limbo 0x02
#define isc_spb_bkp_metadata_only 0x04
@ -431,6 +434,7 @@
#define isc_spb_bkp_expand 0x80
#define isc_spb_bkp_no_triggers 0x8000
#define isc_spb_bkp_zip 0x010000
#define isc_spb_bkp_direct_io 0x020000
/********************************************
* Parameters for isc_action_svc_properties *
@ -521,6 +525,7 @@
#define isc_spb_rpr_commit_trans_64 49
#define isc_spb_rpr_rollback_trans_64 50
#define isc_spb_rpr_recover_two_phase_64 51
#define isc_spb_rpr_par_workers 52
#define isc_spb_rpr_validate_db 0x01
#define isc_spb_rpr_sweep_db 0x02
@ -548,6 +553,7 @@
#define isc_spb_res_keyname isc_spb_bkp_keyname
#define isc_spb_res_crypt isc_spb_bkp_crypt
#define isc_spb_res_stat isc_spb_bkp_stat
#define isc_spb_res_parallel_workers isc_spb_bkp_parallel_workers
#define isc_spb_res_metadata_only isc_spb_bkp_metadata_only
#define isc_spb_res_deactivate_idx 0x0100
#define isc_spb_res_no_shadow 0x0200

View File

@ -401,3 +401,7 @@ FB_IMPL_MSG_NO_SYMBOL(GBAK, 402, "publication for table")
FB_IMPL_MSG_SYMBOL(GBAK, 403, gbak_opt_replica, " @1REPLICA <mode> \"none\", \"read_only\" or \"read_write\" replica mode")
FB_IMPL_MSG_SYMBOL(GBAK, 404, gbak_replica_req, "\"none\", \"read_only\" or \"read_write\" required")
FB_IMPL_MSG_NO_SYMBOL(GBAK, 405, "could not access batch parameters")
FB_IMPL_MSG_NO_SYMBOL(GBAK, 406, " @1PAR(ALLEL) parallel workers")
FB_IMPL_MSG_SYMBOL(GBAK, 407, gbak_missing_prl_wrks, "parallel workers parameter missing")
FB_IMPL_MSG_SYMBOL(GBAK, 408, gbak_inv_prl_wrks, "expected parallel workers, encountered \"@1\"")
FB_IMPL_MSG_NO_SYMBOL(GBAK, 409, " @1D(IRECT_IO) direct IO for backup file(s)")

View File

@ -133,3 +133,4 @@ FB_IMPL_MSG_SYMBOL(GFIX, 132, gfix_opt_role, " -role set SQL ro
FB_IMPL_MSG_SYMBOL(GFIX, 133, gfix_role_req, "SQL role name required")
FB_IMPL_MSG_SYMBOL(GFIX, 134, gfix_opt_repl, " -repl(ica) replica mode <none / read_only / read_write>")
FB_IMPL_MSG_SYMBOL(GFIX, 135, gfix_repl_mode_req, "replica mode (none / read_only / read_write) required")
FB_IMPL_MSG_SYMBOL(GFIX, 136, gfix_opt_parallel, " -par(allel) parallel workers <n> (-sweep)")

View File

@ -3856,6 +3856,8 @@ const
isc_dpb_decfloat_round = byte(94);
isc_dpb_decfloat_traps = byte(95);
isc_dpb_clear_map = byte(96);
isc_dpb_parallel_workers = byte(100);
isc_dpb_worker_attach = byte(101);
isc_dpb_address = byte(1);
isc_dpb_addr_protocol = byte(1);
isc_dpb_addr_endpoint = byte(2);
@ -4013,6 +4015,7 @@ const
isc_spb_bkp_keyname = byte(17);
isc_spb_bkp_crypt = byte(18);
isc_spb_bkp_include_data = byte(19);
isc_spb_bkp_parallel_workers = byte(21);
isc_spb_bkp_ignore_checksums = $01;
isc_spb_bkp_ignore_limbo = $02;
isc_spb_bkp_metadata_only = $04;
@ -4023,6 +4026,7 @@ const
isc_spb_bkp_expand = $80;
isc_spb_bkp_no_triggers = $8000;
isc_spb_bkp_zip = $010000;
isc_spb_bkp_direct_io = $020000;
isc_spb_prp_page_buffers = byte(5);
isc_spb_prp_sweep_interval = byte(6);
isc_spb_prp_shutdown_db = byte(7);
@ -4078,6 +4082,7 @@ const
isc_spb_rpr_commit_trans_64 = byte(49);
isc_spb_rpr_rollback_trans_64 = byte(50);
isc_spb_rpr_recover_two_phase_64 = byte(51);
isc_spb_rpr_par_workers = byte(52);
isc_spb_rpr_validate_db = $01;
isc_spb_rpr_sweep_db = $02;
isc_spb_rpr_mend_db = $04;

View File

@ -254,6 +254,7 @@ Jrd::Attachment::Attachment(MemoryPool* pool, Database* dbb, JProvider* provider
att_dest_bind(&att_bindings),
att_original_timezone(TimeZoneUtil::getSystemTimeZone()),
att_current_timezone(att_original_timezone),
att_parallel_workers(0),
att_repl_appliers(*pool),
att_utility(UTIL_NONE),
att_procedures(*pool),

View File

@ -166,6 +166,7 @@ const ULONG ATT_monitor_init = 0x100000L; // Attachment is registered in monito
const ULONG ATT_repl_reset = 0x200000L; // Replication set has been reset
const ULONG ATT_replicating = 0x400000L; // Replication is active
const ULONG ATT_resetting = 0x800000L; // Session reset is in progress
const ULONG ATT_worker = 0x1000000L; // Worker attachment, managed by the engine
const ULONG ATT_NO_CLEANUP = (ATT_no_cleanup | ATT_notify_gc);
@ -602,6 +603,7 @@ public:
CoercionArray* att_dest_bind;
USHORT att_original_timezone;
USHORT att_current_timezone;
int att_parallel_workers;
Firebird::RefPtr<Firebird::IReplicatedSession> att_replicator;
Firebird::AutoPtr<Replication::TableMatcher> att_repl_matcher;
@ -961,7 +963,7 @@ public:
}
}
private:
protected:
void destroy(Attachment* attachment);
// "public" interface for internal (system) attachment

View File

@ -0,0 +1,423 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: WorkerAttachment.cpp
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#include "../jrd/WorkerAttachment.h"
#include "../common/config/config.h"
#include "../common/isc_proto.h"
#include "../common/utils_proto.h"
#include "../common/StatusArg.h"
#include "../common/classes/ClumpletWriter.h"
#include "../jrd/jrd.h"
#include "../jrd/ini_proto.h"
#include "../jrd/lck_proto.h"
#include "../jrd/pag_proto.h"
#include "../jrd/tra_proto.h"
#include "../jrd/status.h"
using namespace Firebird;
namespace Jrd {
/// WrkStableAttachment
WrkStableAttachment::WrkStableAttachment(FbStatusVector* status, Jrd::Attachment* attachment) :
SysStableAttachment(attachment)
{
UserId user;
user.setUserName("<Worker>");
// user.usr_flags = USR_owner; // need owner privs ??
attachment->att_user = FB_NEW_POOL(*attachment->att_pool) UserId(*attachment->att_pool, user);
attachment->setStable(this);
BackgroundContextHolder tdbb(attachment->att_database, attachment, status, FB_FUNCTION);
LCK_init(tdbb, LCK_OWNER_attachment);
INI_init(tdbb);
INI_init2(tdbb);
PAG_header(tdbb, true);
PAG_attachment_id(tdbb);
TRA_init(attachment);
initDone();
}
WrkStableAttachment::~WrkStableAttachment()
{
fini();
}
WrkStableAttachment* WrkStableAttachment::create(FbStatusVector* status, Jrd::Database* dbb)
{
Attachment* attachment = NULL;
try
{
attachment = Attachment::create(dbb, NULL);
attachment->att_filename = dbb->dbb_filename;
attachment->att_flags |= ATT_worker;
WrkStableAttachment* sAtt = FB_NEW WrkStableAttachment(status, attachment);
return sAtt;
}
catch (const Exception& ex)
{
ex.stuffException(status);
}
if (attachment)
Attachment::destroy(attachment);
return NULL;
}
void WrkStableAttachment::fini()
{
Attachment* attachment = NULL;
{
AttSyncLockGuard guard(*getSync(), FB_FUNCTION);
attachment = getHandle();
if (!attachment)
return;
Database* dbb = attachment->att_database;
FbLocalStatus status_vector;
BackgroundContextHolder tdbb(dbb, attachment, &status_vector, FB_FUNCTION);
Monitoring::cleanupAttachment(tdbb);
attachment->releaseLocks(tdbb);
LCK_fini(tdbb, LCK_OWNER_attachment);
attachment->releaseRelations(tdbb);
}
destroy(attachment);
}
/// class WorkerAttachment
GlobalPtr<Mutex> WorkerAttachment::m_mapMutex;
GlobalPtr<WorkerAttachment::MapDbIdToWorkAtts> WorkerAttachment::m_map;
bool WorkerAttachment::m_shutdown = false;
WorkerAttachment::WorkerAttachment() :
m_idleAtts(*getDefaultMemoryPool()),
m_activeAtts(*getDefaultMemoryPool()),
m_cntUserAtts(0)
{
}
void WorkerAttachment::incUserAtts(const PathName& dbname)
{
if (Config::getServerMode() == MODE_SUPER)
return;
WorkerAttachment* item = getByName(dbname);
if (item)
{
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
item->m_cntUserAtts++;
}
}
void WorkerAttachment::decUserAtts(const PathName& dbname)
{
if (Config::getServerMode() == MODE_SUPER)
return;
WorkerAttachment* item = getByName(dbname);
if (item)
{
bool tryClear = false;
{
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
item->m_cntUserAtts--;
tryClear = (item->m_cntUserAtts == 0 && item->m_activeAtts.isEmpty());
}
if (tryClear)
item->clear(true);
}
}
WorkerAttachment* WorkerAttachment::getByName(const PathName& dbname)
{
if (m_shutdown)
return NULL;
WorkerAttachment* ret = NULL;
MutexLockGuard guard(m_mapMutex, FB_FUNCTION);
if (m_shutdown)
return NULL;
if (!m_map->get(dbname, ret))
{
ret = new WorkerAttachment();
m_map->put(dbname, ret);
}
return ret;
}
void WorkerAttachment::shutdown()
{
if (m_shutdown)
return;
MutexLockGuard guard(m_mapMutex, FB_FUNCTION);
if (m_shutdown)
return;
m_shutdown = true;
MapDbIdToWorkAtts::Accessor acc(&m_map);
if (!acc.getFirst())
return;
do
{
WorkerAttachment* item = acc.current()->second;
item->clear(false);
delete item;
}
while (acc.getNext());
m_map->clear();
}
void WorkerAttachment::shutdownDbb(Database* dbb)
{
if (Config::getServerMode() != MODE_SUPER)
return;
MutexLockGuard guard(m_mapMutex, FB_FUNCTION);
WorkerAttachment* item = NULL;
if (!m_map->get(dbb->dbb_filename, item))
return;
item->clear(false);
}
StableAttachmentPart* WorkerAttachment::getAttachment(FbStatusVector* status, Database* dbb)
{
//?? Database::Checkout cout(dbb);
Arg::Gds(isc_shutdown).copyTo(status);
WorkerAttachment* item = getByName(dbb->dbb_filename);
if (!item)
return NULL;
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
if (m_shutdown)
return NULL;
FB_SIZE_T maxWorkers = Config::getMaxParallelWorkers();
if (maxWorkers <= 0)
maxWorkers = MAX_ULONG;
StableAttachmentPart* sAtt = NULL;
while (!item->m_idleAtts.isEmpty())
{
if (m_shutdown)
return NULL;
sAtt = item->m_idleAtts.pop();
if (sAtt->getHandle())
break;
// idle worker attachment was unexpectedly deleted, clean up and try next one
MutexUnlockGuard unlock(item->m_mutex, FB_FUNCTION);
FbLocalStatus local;
doDetach(&local, sAtt);
sAtt = NULL;
}
if (!sAtt)
{
if (item->m_activeAtts.getCount() >= maxWorkers)
{
(Arg::Gds(isc_random) << Arg::Str("No enough free worker attachments")).copyTo(status);
return NULL;
}
MutexUnlockGuard unlock(item->m_mutex, FB_FUNCTION);
status->init();
sAtt = doAttach(status, dbb);
if (!sAtt)
{
// log error ?
if (!m_shutdown)
iscLogStatus("Failed to create worker attachment\n", status);
return NULL;
}
}
Attachment* att = NULL;
{
MutexUnlockGuard unlock(item->m_mutex, FB_FUNCTION);
AttSyncLockGuard guard(*sAtt->getSync(), FB_FUNCTION);
att = sAtt->getHandle();
fb_assert(!att || (att->att_flags & ATT_worker));
if (att)
att->att_use_count++;
}
if (att)
item->m_activeAtts.add(sAtt);
return sAtt;
}
void WorkerAttachment::releaseAttachment(FbStatusVector* status, StableAttachmentPart* sAtt)
{
status->init();
WorkerAttachment* item = NULL;
{
AttSyncLockGuard attGuard(*sAtt->getSync(), FB_FUNCTION);
Attachment* att = sAtt->getHandle();
if (!att)
return;
att->att_use_count--;
item = getByName(att->att_database->dbb_filename);
}
bool detach = (m_shutdown || (item == NULL));
bool tryClear = false;
if (item)
{
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
FB_SIZE_T pos;
if (item->m_activeAtts.find(sAtt, pos))
item->m_activeAtts.remove(pos);
if (!m_shutdown)
{
item->m_idleAtts.push(sAtt);
tryClear = (item->m_cntUserAtts == 0 && item->m_activeAtts.isEmpty());
}
}
if (detach)
doDetach(status, sAtt);
if (tryClear && (Config::getServerMode() != MODE_SUPER))
item->clear(true);
}
void WorkerAttachment::clear(bool checkRefs)
{
HalfStaticArray<Jrd::StableAttachmentPart*, 8> toDetach(*getDefaultMemoryPool());
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (checkRefs && (m_cntUserAtts != 0 || !m_activeAtts.isEmpty()))
return;
toDetach.assign(m_idleAtts);
m_idleAtts.clear();
m_activeAtts.clear(); // should be released by regular JRD shutdown
}
FbLocalStatus status;
while (!toDetach.isEmpty())
{
StableAttachmentPart* sAtt = toDetach.pop();
doDetach(&status, sAtt);
}
}
StableAttachmentPart* WorkerAttachment::doAttach(FbStatusVector* status, Database* dbb)
{
StableAttachmentPart* sAtt = NULL;
if (Config::getServerMode() == MODE_SUPER)
sAtt = WrkStableAttachment::create(status, dbb);
else
{
ClumpletWriter dpb(ClumpletReader::Tagged, MAX_DPB_SIZE, isc_dpb_version1);
dpb.insertString(isc_dpb_trusted_auth, DBA_USER_NAME);
dpb.insertInt(isc_dpb_worker_attach, 1);
AutoPlugin<JProvider> jInstance(JProvider::getInstance());
//jInstance->setDbCryptCallback(&status, tdbb->getAttachment()->att_crypt_callback);
JAttachment* jAtt = jInstance->attachDatabase(status, dbb->dbb_filename.c_str(),
dpb.getBufferLength(), dpb.getBuffer());
if (!(status->getState() & IStatus::STATE_ERRORS))
sAtt = jAtt->getStable();
}
if (sAtt)
sAtt->addRef(); // !!
return sAtt;
}
void WorkerAttachment::doDetach(FbStatusVector* status, StableAttachmentPart* sAtt)
{
status->init();
// if (att->att_flags & ATT_system)
if (Config::getServerMode() == MODE_SUPER)
{
WrkStableAttachment* wrk = reinterpret_cast<WrkStableAttachment*>(sAtt);
wrk->fini();
}
else
{
JAttachment* jAtt = sAtt->getInterface();
jAtt->detach(status);
jAtt->release();
}
sAtt->release(); // !!
}
} // namespace Jrd

112
src/jrd/WorkerAttachment.h Normal file
View File

@ -0,0 +1,112 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: WorkerAttachment.h
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#ifndef JRD_WORKER_ATTACHMENT_H
#define JRD_WORKER_ATTACHMENT_H
#include "firebird.h"
#include "../common/classes/alloc.h"
#include "../common/classes/array.h"
#include "../common/classes/fb_string.h"
#include "../common/classes/GenericMap.h"
#include "../common/classes/init.h"
#include "../common/classes/locks.h"
#include "../jrd/Attachment.h"
#include "../jrd/jrd.h"
#include "../jrd/status.h"
namespace Jrd
{
class WrkStableAttachment : public SysStableAttachment
{
public:
static WrkStableAttachment* create(FbStatusVector* status, Jrd::Database* dbb);
void fini();
private:
explicit WrkStableAttachment(FbStatusVector* status, Jrd::Attachment* att);
virtual ~WrkStableAttachment();
};
class WorkerContextHolder : public Jrd::DatabaseContextHolder, public Jrd::Attachment::SyncGuard
{
public:
WorkerContextHolder(thread_db* tdbb, const char* f) :
DatabaseContextHolder(tdbb),
Jrd::Attachment::SyncGuard(tdbb->getAttachment(), f)
{
}
private:
// copying is prohibited
WorkerContextHolder(const WorkerContextHolder&);
WorkerContextHolder& operator=(const WorkerContextHolder&);
};
class WorkerAttachment
{
public:
explicit WorkerAttachment();
static Jrd::StableAttachmentPart* getAttachment(FbStatusVector* status, Jrd::Database* dbb);
static void releaseAttachment(FbStatusVector* status, Jrd::StableAttachmentPart* sAtt);
static void incUserAtts(const Firebird::PathName& dbname);
static void decUserAtts(const Firebird::PathName& dbname);
static void shutdown();
static void shutdownDbb(Jrd::Database* dbb);
private:
static WorkerAttachment* getByName(const Firebird::PathName& dbname);
static Jrd::StableAttachmentPart* doAttach(FbStatusVector* status, Jrd::Database* dbb);
static void doDetach(FbStatusVector* status, Jrd::StableAttachmentPart* sAtt);
void clear(bool checkRefs);
typedef Firebird::GenericMap<Firebird::Pair<Firebird::Left<Firebird::PathName, WorkerAttachment*> > >
MapDbIdToWorkAtts;
static Firebird::GlobalPtr<Firebird::Mutex> m_mapMutex;
static Firebird::GlobalPtr<MapDbIdToWorkAtts> m_map;
static bool m_shutdown;
Firebird::Mutex m_mutex;
Firebird::HalfStaticArray<Jrd::StableAttachmentPart*, 8> m_idleAtts;
Firebird::SortedArray<Jrd::StableAttachmentPart*,
Firebird::InlineStorage<Jrd::StableAttachmentPart*, 8> > m_activeAtts;
int m_cntUserAtts;
};
} // namespace Jrd
#endif // JRD_WORKER_ATTACHMENT_H

View File

@ -3440,7 +3440,6 @@ static ULONG fast_load(thread_db* tdbb,
jrd_rel* const relation = creation.relation;
index_desc* const idx = creation.index;
const USHORT key_length = creation.key_length;
Sort* const scb = creation.sort;
const USHORT pageSpaceID = relation->getPages(tdbb)->rel_pg_space_id;
@ -3560,9 +3559,9 @@ static ULONG fast_load(thread_db* tdbb,
// Get the next record in sorted order.
UCHAR* record;
scb->get(tdbb, reinterpret_cast<ULONG**>(&record));
creation.sort->get(tdbb, reinterpret_cast<ULONG**>(&record));
if (!record || creation.duplicates)
if (!record || creation.duplicates.value())
break;
index_sort_record* isr = (index_sort_record*) (record + key_length);
@ -3780,7 +3779,7 @@ static ULONG fast_load(thread_db* tdbb,
++duplicates;
if (unique && primarySeen && isPrimary && !(isr->isr_flags & ISR_null))
{
creation.duplicates++;
++creation.duplicates;
creation.dup_recno = isr->isr_record_number;
}
@ -4153,8 +4152,7 @@ static ULONG fast_load(thread_db* tdbb,
tdbb->tdbb_flags &= ~TDBB_no_cache_unwind;
// do some final housekeeping
creation.sort.reset();
//creation.sort.reset();
// If index flush fails, try to delete the index tree.
// If the index delete fails, just go ahead and punt.

View File

@ -48,6 +48,8 @@ struct temporary_key;
class jrd_tra;
class BtrPageGCLock;
class Sort;
class PartitionedSort;
struct sort_key_def;
// Index descriptor block -- used to hold info from index root page
@ -278,11 +280,14 @@ struct IndexCreation
{
jrd_rel* relation;
index_desc* index;
const TEXT* index_name;
jrd_tra* transaction;
PartitionedSort* sort;
sort_key_def* key_desc;
USHORT key_length;
Firebird::AutoPtr<Sort> sort;
USHORT nullIndLen;
SINT64 dup_recno;
SLONG duplicates;
Firebird::AtomicCounter duplicates;
};
// Class used to report any index related errors

View File

@ -1651,7 +1651,7 @@ punt:
}
bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage)
bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, FindNextRecordScope scope)
{
/**************************************
*
@ -1753,16 +1753,7 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
}
}
if (window->win_flags & WIN_large_scan)
CCH_RELEASE_TAIL(tdbb, window);
else if ((window->win_flags & WIN_garbage_collector) &&
(window->win_flags & WIN_garbage_collect))
{
CCH_RELEASE_TAIL(tdbb, window);
window->win_flags &= ~WIN_garbage_collect;
}
else
CCH_RELEASE(tdbb, window);
CCH_RELEASE(tdbb, window);
}
// Find the next pointer page, data page, and record
@ -1850,9 +1841,11 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
rpb->rpb_number.decrement();
check_swept(tdbb, rpb);
rpb->rpb_number = saveRecNo;
tdbb->checkCancelState();
}
if (onepage)
if (scope == DPM_next_data_page)
return false;
if (!(ppage = get_pointer_page(tdbb, rpb->rpb_relation, relPages, window,
@ -1862,7 +1855,7 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
}
}
if (onepage)
if (scope == DPM_next_data_page)
{
CCH_RELEASE(tdbb, window);
return false;
@ -1882,11 +1875,8 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
else
CCH_RELEASE(tdbb, window);
if (flags & ppg_eof || onepage)
if ((flags & ppg_eof) || (scope != DPM_next_all))
return false;
if (sweeper)
tdbb->checkCancelState();
}
}

View File

@ -26,6 +26,7 @@
#include "../jrd/RecordNumber.h"
#include "../jrd/sbm.h"
#include "../jrd/vio_proto.h"
// fwd. decl.
namespace Jrd
@ -68,7 +69,7 @@ void DPM_fetch_fragment(Jrd::thread_db*, Jrd::record_param*, USHORT);
SINT64 DPM_gen_id(Jrd::thread_db*, SLONG, bool, SINT64);
bool DPM_get(Jrd::thread_db*, Jrd::record_param*, SSHORT);
ULONG DPM_get_blob(Jrd::thread_db*, Jrd::blb*, RecordNumber, bool, ULONG);
bool DPM_next(Jrd::thread_db*, Jrd::record_param*, USHORT, bool);
bool DPM_next(Jrd::thread_db*, Jrd::record_param*, USHORT, Jrd::FindNextRecordScope);
void DPM_pages(Jrd::thread_db*, SSHORT, int, ULONG, ULONG);
#ifdef SUPERSERVER_V2
SLONG DPM_prefetch_bitmap(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::PageBitmap*, SLONG);

View File

@ -63,6 +63,8 @@
#include "../jrd/vio_proto.h"
#include "../jrd/tra_proto.h"
#include "../jrd/Collation.h"
#include "../common/Task.h"
#include "../jrd/WorkerAttachment.h"
using namespace Jrd;
using namespace Ods;
@ -228,120 +230,314 @@ bool IDX_check_master_types(thread_db* tdbb, index_desc& idx, jrd_rel* partner_r
}
void IDX_create_index(thread_db* tdbb,
jrd_rel* relation,
index_desc* idx,
const TEXT* index_name,
USHORT* index_id,
jrd_tra* transaction,
SelectivityList& selectivity)
namespace Jrd {
class IndexCreateTask : public Task
{
/**************************************
*
* I D X _ c r e a t e _ i n d e x
*
**************************************
*
* Functional description
* Create and populate index.
*
**************************************/
idx_e result = idx_e_ok;
public:
IndexCreateTask(thread_db* tdbb, MemoryPool* pool, IndexCreation* creation) : Task(),
m_pool(pool),
m_tdbb_flags(tdbb->tdbb_flags),
m_creation(creation),
m_sorts(*m_pool),
m_items(*m_pool),
m_stop(false),
m_countPP(0),
m_nextPP(0)
{
m_dbb = tdbb->getDatabase();
Attachment* att = tdbb->getAttachment();
m_exprBlob.clear();
int workers = 1;
if (att->att_parallel_workers > 0)
workers = att->att_parallel_workers;
for (int i = 0; i < workers; i++)
m_items.add(FB_NEW_POOL(*m_pool) Item(this));
m_items[0]->m_ownAttach = false;
m_items[0]->m_attStable = att->getStable();
m_items[0]->m_tra = m_creation->transaction;
if (m_creation)
{
m_countPP = m_creation->relation->getPages(tdbb)->rel_pages->count();
if ((m_creation->index->idx_flags & idx_expressn) && (workers > 1))
MET_lookup_index_expression_blr(tdbb, m_creation->index_name, m_exprBlob);
}
}
virtual ~IndexCreateTask()
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
delete *p;
}
bool Handler(WorkItem& _item);
bool GetWorkItem(WorkItem** pItem);
bool GetResult(IStatus* status);
int GetMaxWorkers();
class Item : public Task::WorkItem
{
public:
Item(IndexCreateTask* task) : Task::WorkItem(task),
m_inuse(false),
m_ownAttach(true),
m_tra(NULL),
m_sort(NULL),
m_ppSequence(0)
{}
virtual ~Item()
{
if (m_sort)
{
MutexLockGuard guard(getTask()->m_mutex, FB_FUNCTION);
delete m_sort;
m_sort = NULL;
}
if (!m_ownAttach || !m_attStable)
return;
Attachment* att = NULL;
{
AttSyncLockGuard guard(*m_attStable->getSync(), FB_FUNCTION);
att = m_attStable->getHandle();
if (!att)
return;
fb_assert(att->att_use_count > 0);
}
FbLocalStatus status;
if (m_tra)
{
BackgroundContextHolder tdbb(att->att_database, att, &status, FB_FUNCTION);
TRA_commit(tdbb, m_tra, false);
}
WorkerAttachment::releaseAttachment(&status, m_attStable);
}
bool init(thread_db* tdbb)
{
FbStatusVector* status = tdbb->tdbb_status_vector;
Attachment* att = NULL;
if (m_ownAttach && !m_attStable.hasData())
m_attStable = WorkerAttachment::getAttachment(status, getTask()->m_dbb);
if (m_attStable)
att = m_attStable->getHandle();
if (!att)
{
Arg::Gds(isc_bad_db_handle).copyTo(status);
return false;
}
IndexCreation* creation = getTask()->m_creation;
tdbb->setDatabase(att->att_database);
tdbb->setAttachment(att);
if (m_ownAttach && !m_tra)
{
try
{
WorkerContextHolder holder(tdbb, FB_FUNCTION);
m_tra = TRA_start(tdbb, creation->transaction->tra_flags,
creation->transaction->tra_lock_timeout);
}
catch (const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
return false;
}
}
tdbb->setTransaction(m_tra);
if (!m_sort)
{
m_idx = *creation->index; // copy
if (m_ownAttach)
{
m_idx.idx_expression = NULL;
m_idx.idx_expression_statement = NULL;
m_idx.idx_foreign_indexes = NULL;
m_idx.idx_foreign_primaries = NULL;
m_idx.idx_foreign_relations = NULL;
}
FPTR_REJECT_DUP_CALLBACK callback = NULL;
void* callback_arg = NULL;
if (m_idx.idx_flags & idx_unique)
{
callback = duplicate_key;
callback_arg = creation;
}
MutexLockGuard guard(getTask()->m_mutex, FB_FUNCTION);
m_sort = FB_NEW_POOL(getTask()->m_sorts.getPool())
Sort(att->att_database, &getTask()->m_sorts,
creation->key_length + sizeof(index_sort_record),
2, 1, creation->key_desc, callback, callback_arg);
creation->sort->addPartition(m_sort);
}
return true;
}
IndexCreateTask* getTask() const
{
return reinterpret_cast<IndexCreateTask*> (m_task);
}
bool m_inuse;
bool m_ownAttach;
RefPtr<StableAttachmentPart> m_attStable;
jrd_tra* m_tra;
index_desc m_idx;
Sort* m_sort;
ULONG m_ppSequence;
};
private:
void setError(IStatus* status, bool stopTask)
{
const bool copyStatus = (m_status.isSuccess() && status && status->getState() == IStatus::STATE_ERRORS);
if (!copyStatus && (!stopTask || m_stop))
return;
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (m_status.isSuccess() && copyStatus)
m_status.save(status);
if (stopTask)
m_stop = true;
}
MemoryPool* m_pool;
Database* m_dbb;
const ULONG m_tdbb_flags;
IndexCreation* m_creation;
SortOwner m_sorts;
bid m_exprBlob;
Mutex m_mutex;
HalfStaticArray<Item*, 8> m_items;
StatusHolder m_status;
volatile bool m_stop;
ULONG m_countPP;
ULONG m_nextPP;
};
bool IndexCreateTask::Handler(WorkItem& _item)
{
Item* item = reinterpret_cast<Item*>(&_item);
ThreadContextHolder tdbb(NULL);
tdbb->tdbb_flags = m_tdbb_flags;
if (!item->init(tdbb))
{
setError(tdbb->tdbb_status_vector, true);
return false;
}
WorkerContextHolder holder(tdbb, FB_FUNCTION);
SET_TDBB(tdbb);
Database* dbb = tdbb->getDatabase();
Jrd::Attachment* attachment = tdbb->getAttachment();
Attachment* attachment = tdbb->getAttachment();
jrd_rel* relation = MET_relation(tdbb, m_creation->relation->rel_id);
if (!(relation->rel_flags & REL_scanned))
MET_scan_relation(tdbb, relation);
if (relation->rel_file)
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_extfile_uns_op) << Arg::Str(relation->rel_name));
}
else if (relation->isVirtual())
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_wish_list));
}
get_root_page(tdbb, relation);
fb_assert(transaction);
index_desc* idx = &item->m_idx;
jrd_tra* transaction = item->m_tra ? item->m_tra : m_creation->transaction;
Sort* scb = item->m_sort;
idx_e result = idx_e_ok;
RecordStack stack;
record_param primary, secondary;
secondary.rpb_relation = relation;
primary.rpb_relation = relation;
primary.rpb_relation = relation;
primary.rpb_number.setValue(BOF_NUMBER);
//primary.getWindow(tdbb).win_flags = secondary.getWindow(tdbb).win_flags = 0; redundant
const bool isDescending = (idx->idx_flags & idx_descending);
const bool isPrimary = (idx->idx_flags & idx_primary);
const bool isForeign = (idx->idx_flags & idx_foreign);
IndexErrorContext context(relation, idx, m_creation->index_name);
// hvlad: in ODS11 empty string and NULL values can have the same binary
// representation in index keys. BTR can distinguish it by the key_length
// but SORT module currently don't take it into account. Therefore add to
// the index key one byte prefix with 0 for NULL value and 1 for not-NULL
// value to produce right sorting.
// BTR\fast_load will remove this one byte prefix from the index key.
// Note that this is necessary only for single-segment ascending indexes
// and only for ODS11 and higher.
try {
const int nullIndLen = !isDescending && (idx->idx_count == 1) ? 1 : 0;
const USHORT key_length = ROUNDUP(BTR_key_length(tdbb, relation, idx) + nullIndLen, sizeof(SINT64));
if (key_length >= dbb->getMaxIndexKeyLength())
// If scan is finished, do final sort pass over own sort
if (item->m_ppSequence == m_countPP)
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_keytoobig) << Arg::Str(index_name));
//fb_assert((scb->scb_flags & scb_sorted) == 0);
if (item->m_ownAttach && idx->idx_expression_statement)
{
idx->idx_expression_statement->release(tdbb);
idx->idx_expression_statement = NULL;
}
if (!m_stop && m_creation->duplicates.value() == 0)
scb->sort(tdbb);
if (!m_stop && m_creation->duplicates.value() > 0)
{
AutoPtr<Record> error_record;
primary.rpb_record = NULL;
fb_assert(m_creation->dup_recno >= 0);
primary.rpb_number.setValue(m_creation->dup_recno);
if (DPM_get(tdbb, &primary, LCK_read))
{
if (primary.rpb_flags & rpb_deleted)
CCH_RELEASE(tdbb, &primary.getWindow(tdbb));
else
{
VIO_data(tdbb, &primary, dbb->dbb_permanent);
error_record = primary.rpb_record;
}
}
context.raise(tdbb, idx_e_duplicate, error_record);
}
return true;
}
IndexCreation creation;
creation.index = idx;
creation.relation = relation;
creation.transaction = transaction;
creation.key_length = key_length;
creation.dup_recno = -1;
creation.duplicates = 0;
BTR_reserve_slot(tdbb, creation);
if (index_id)
*index_id = idx->idx_id;
RecordStack stack;
const UCHAR pad = isDescending ? -1 : 0;
sort_key_def key_desc[2];
// Key sort description
key_desc[0].setSkdLength(SKD_bytes, key_length);
key_desc[0].skd_flags = SKD_ascending;
key_desc[0].setSkdOffset();
key_desc[0].skd_vary_offset = 0;
// RecordNumber sort description
key_desc[1].setSkdLength(SKD_int64, sizeof(RecordNumber));
key_desc[1].skd_flags = SKD_ascending;
key_desc[1].setSkdOffset(key_desc);
key_desc[1].skd_vary_offset = 0;
FPTR_REJECT_DUP_CALLBACK callback = (idx->idx_flags & idx_unique) ? duplicate_key : NULL;
void* callback_arg = (idx->idx_flags & idx_unique) ? &creation : NULL;
Sort* const scb = FB_NEW_POOL(transaction->tra_sorts.getPool())
Sort(dbb, &transaction->tra_sorts, key_length + sizeof(index_sort_record),
2, 1, key_desc, callback, callback_arg);
creation.sort = scb;
jrd_rel* partner_relation = NULL;
jrd_rel* partner_relation = 0;
USHORT partner_index_id = 0;
if (isForeign)
if (idx->idx_flags & idx_foreign)
{
if (!MET_lookup_partner(tdbb, relation, idx, index_name))
BUGCHECK(173); // msg 173 referenced index description not found
// if (!MET_lookup_partner(tdbb, relation, idx, m_creation->index_name)) {
// BUGCHECK(173); // msg 173 referenced index description not found
// }
partner_relation = MET_relation(tdbb, idx->idx_primary_relation);
partner_index_id = idx->idx_primary_index;
}
if ((idx->idx_flags & idx_expressn) && (idx->idx_expression == NULL))
{
fb_assert(!m_exprBlob.isEmpty());
CompilerScratch* csb = NULL;
Jrd::ContextPoolHolder context(tdbb, attachment->createPool());
idx->idx_expression = static_cast<ValueExprNode*> (MET_parse_blob(tdbb, relation, &m_exprBlob,
&csb, &idx->idx_expression_statement, false, false));
delete csb;
}
// Checkout a garbage collect record block for fetching data.
AutoGCRecord gc_record(VIO_gc_record(tdbb, relation));
@ -357,12 +553,29 @@ void IDX_create_index(thread_db* tdbb,
}
}
IndexErrorContext context(relation, idx, index_name);
const bool isDescending = (idx->idx_flags & idx_descending);
const bool isPrimary = (idx->idx_flags & idx_primary);
const bool isForeign = (idx->idx_flags & idx_foreign);
const UCHAR pad = isDescending ? -1 : 0;
bool key_is_null = false;
primary.rpb_number.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_ppSequence);
primary.rpb_number.decrement();
RecordNumber lastRecNo;
lastRecNo.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_ppSequence + 1);
lastRecNo.decrement();
// Loop thru the relation computing index keys. If there are old versions, find them, too.
temporary_key key;
while (DPM_next(tdbb, &primary, LCK_read, false))
while (DPM_next(tdbb, &primary, LCK_read, DPM_next_pointer_page))
{
if (primary.rpb_number >= lastRecNo)
{
CCH_RELEASE(tdbb, &primary.getWindow(tdbb));
break;
}
if (!VIO_garbage_collect(tdbb, &primary, transaction))
continue;
@ -389,7 +602,7 @@ void IDX_create_index(thread_db* tdbb,
secondary.rpb_line = primary.rpb_b_line;
secondary.rpb_prior = primary.rpb_prior;
while (secondary.rpb_page)
while (!m_stop && secondary.rpb_page)
{
if (!DPM_fetch(tdbb, &secondary, LCK_read))
break; // must be garbage collected
@ -401,7 +614,7 @@ void IDX_create_index(thread_db* tdbb,
secondary.rpb_line = secondary.rpb_b_line;
}
while (stack.hasData())
while (!m_stop && stack.hasData())
{
Record* record = stack.pop();
@ -444,7 +657,7 @@ void IDX_create_index(thread_db* tdbb,
context.raise(tdbb, result, record);
}
if (key.key_length > key_length)
if (key.key_length > m_creation->key_length)
{
do {
if (record != gc_record)
@ -462,7 +675,7 @@ void IDX_create_index(thread_db* tdbb,
// try to catch duplicates early
if (creation.duplicates > 0)
if (m_creation->duplicates.value() > 0)
{
do {
if (record != gc_record)
@ -472,7 +685,7 @@ void IDX_create_index(thread_db* tdbb,
break;
}
if (nullIndLen)
if (m_creation->nullIndLen)
*p++ = (key.key_length == 0) ? 0 : 1;
if (key.key_length > 0)
@ -481,7 +694,7 @@ void IDX_create_index(thread_db* tdbb,
p += key.key_length;
}
int l = int(key_length) - nullIndLen - key.key_length; // must be signed
int l = int(m_creation->key_length) - m_creation->nullIndLen - key.key_length; // must be signed
if (l > 0)
{
@ -499,7 +712,10 @@ void IDX_create_index(thread_db* tdbb,
delete record;
}
if (creation.duplicates > 0)
if (m_stop)
break;
if (m_creation->duplicates.value() > 0)
break;
JRD_reschedule(tdbb);
@ -509,18 +725,210 @@ void IDX_create_index(thread_db* tdbb,
if (primary.getWindow(tdbb).win_flags & WIN_large_scan)
--relation->rel_scan_count;
}
catch (const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
setError(tdbb->tdbb_status_vector, true);
if (!creation.duplicates)
scb->sort(tdbb);
delete scb;
item->m_sort = NULL;
return false;
}
// ASF: We have a callback accessing "creation", so don't join above and below if's.
return true;
}
if (!creation.duplicates)
bool IndexCreateTask::GetWorkItem(WorkItem** pItem)
{
Item* item = reinterpret_cast<Item*> (*pItem);
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (m_stop)
return false;
if (item == NULL)
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
if (!(*p)->m_inuse)
{
(*p)->m_inuse = true;
*pItem = item = *p;
break;
}
}
if (!item)
return false;
item->m_inuse = (m_nextPP < m_countPP) ||
(item->m_sort && item->m_sort->isSorted()) == 0;
if (item->m_inuse)
{
item->m_ppSequence = m_nextPP;
if (m_nextPP < m_countPP)
m_nextPP += 1;
}
return item->m_inuse;
}
bool IndexCreateTask::GetResult(IStatus* status)
{
if (status)
{
status->init();
status->setErrors(m_status.getErrors());
}
return m_status.isSuccess();
}
int IndexCreateTask::GetMaxWorkers()
{
const int parWorkers = m_items.getCount();
if (parWorkers == 1 || m_countPP == 0)
return 1;
fb_assert(m_creation != NULL);
if (!m_creation || m_creation->relation->isTemporary())
return 1;
return MIN(parWorkers, m_countPP);
}
}; // namespace Jrd
void IDX_create_index(thread_db* tdbb,
jrd_rel* relation,
index_desc* idx,
const TEXT* index_name,
USHORT* index_id,
jrd_tra* transaction,
SelectivityList& selectivity)
{
/**************************************
*
* I D X _ c r e a t e _ i n d e x
*
**************************************
*
* Functional description
* Create and populate index.
*
**************************************/
idx_e result = idx_e_ok;
SET_TDBB(tdbb);
Database* dbb = tdbb->getDatabase();
Jrd::Attachment* attachment = tdbb->getAttachment();
if (relation->rel_file)
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_extfile_uns_op) << Arg::Str(relation->rel_name));
}
else if (relation->isVirtual())
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_wish_list));
}
get_root_page(tdbb, relation);
fb_assert(transaction);
const bool isDescending = (idx->idx_flags & idx_descending);
const bool isPrimary = (idx->idx_flags & idx_primary);
const bool isForeign = (idx->idx_flags & idx_foreign);
// hvlad: in ODS11 empty string and NULL values can have the same binary
// representation in index keys. BTR can distinguish it by the key_length
// but SORT module currently don't take it into account. Therefore add to
// the index key one byte prefix with 0 for NULL value and 1 for not-NULL
// value to produce right sorting.
// BTR\fast_load will remove this one byte prefix from the index key.
// Note that this is necessary only for single-segment ascending indexes
// and only for ODS11 and higher.
const int nullIndLen = !isDescending && (idx->idx_count == 1) ? 1 : 0;
const USHORT key_length = ROUNDUP(BTR_key_length(tdbb, relation, idx) + nullIndLen, sizeof(SINT64));
if (key_length >= dbb->getMaxIndexKeyLength())
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_keytoobig) << Arg::Str(index_name));
}
if (isForeign)
{
if (!MET_lookup_partner(tdbb, relation, idx, index_name)) {
BUGCHECK(173); // msg 173 referenced index description not found
}
}
IndexCreation creation;
creation.index = idx;
creation.index_name = index_name;
creation.relation = relation;
creation.transaction = transaction;
creation.sort = NULL;
creation.key_length = key_length;
creation.nullIndLen = nullIndLen;
creation.dup_recno = -1;
creation.duplicates.setValue(0);
BTR_reserve_slot(tdbb, creation);
if (index_id)
*index_id = idx->idx_id;
sort_key_def key_desc[2];
// Key sort description
key_desc[0].setSkdLength(SKD_bytes, key_length);
key_desc[0].skd_flags = SKD_ascending;
key_desc[0].setSkdOffset();
key_desc[0].skd_vary_offset = 0;
// RecordNumber sort description
key_desc[1].setSkdLength(SKD_int64, sizeof(RecordNumber));
key_desc[1].skd_flags = SKD_ascending;
key_desc[1].setSkdOffset(key_desc);
key_desc[1].skd_vary_offset = 0;
creation.key_desc = key_desc;
PartitionedSort sort(dbb, &transaction->tra_sorts);
creation.sort = &sort;
Coordinator coord(dbb->dbb_permanent);
IndexCreateTask task(tdbb, dbb->dbb_permanent, &creation);
{
EngineCheckout cout(tdbb, FB_FUNCTION);
FbLocalStatus local_status;
fb_utils::init_status(&local_status);
coord.RunSync(&task);
if (!task.GetResult(&local_status))
local_status.raise();
}
sort.buidMergeTree();
if (creation.duplicates.value() == 0)
BTR_create(tdbb, creation, selectivity);
if (creation.duplicates > 0)
if (creation.duplicates.value() > 0)
{
AutoPtr<Record> error_record;
record_param primary;
primary.rpb_relation = relation;
primary.rpb_record = NULL;
fb_assert(creation.dup_recno >= 0);
primary.rpb_number.setValue(creation.dup_recno);
@ -537,6 +945,7 @@ void IDX_create_index(thread_db* tdbb,
}
IndexErrorContext context(relation, idx, index_name);
context.raise(tdbb, idx_e_duplicate, error_record);
}
@ -1519,7 +1928,7 @@ static bool duplicate_key(const UCHAR* record1, const UCHAR* record2, void* ifl_
if (!(rec1->isr_flags & (ISR_secondary | ISR_null)) &&
!(rec2->isr_flags & (ISR_secondary | ISR_null)))
{
if (!ifl_data->duplicates++)
if (ifl_data->duplicates.exchangeAdd(1) == 0)
ifl_data->dup_recno = rec2->isr_record_number;
}

View File

@ -84,6 +84,7 @@ enum irq_type_t
irq_c_exp_index, // create expression index
irq_l_exp_index, // lookup expression index
irq_l_exp_index_blr, // lookup expression index BLR
irq_l_rel_id, // lookup relation id
irq_l_procedure, // lookup procedure name

View File

@ -112,6 +112,7 @@
#include "../jrd/Mapping.h"
#include "../jrd/Database.h"
#include "../jrd/WorkerAttachment.h"
#include "../common/config/config.h"
#include "../common/config/dir_list.h"
@ -1082,6 +1083,8 @@ namespace Jrd
bool dpb_reset_icu;
bool dpb_map_attach;
ULONG dpb_remote_flags;
SSHORT dpb_parallel_workers;
bool dpb_worker_attach;
ReplicaMode dpb_replica_mode;
bool dpb_set_db_replica;
bool dpb_clear_map;
@ -2147,6 +2150,11 @@ JAttachment* JProvider::internalAttach(CheckStatusWrapper* user_status, const ch
}
}
if (options.dpb_parallel_workers)
{
attachment->att_parallel_workers = options.dpb_parallel_workers;
}
if (options.dpb_set_db_readonly)
{
validateAccess(tdbb, attachment, CHANGE_HEADER_SETTINGS);
@ -2241,6 +2249,11 @@ JAttachment* JProvider::internalAttach(CheckStatusWrapper* user_status, const ch
}
}
if (options.dpb_worker_attach)
attachment->att_flags |= ATT_worker;
else
WorkerAttachment::incUserAtts(dbb->dbb_filename);
jAtt->getStable()->manualUnlock(attachment->att_flags);
return jAtt;
@ -3117,6 +3130,11 @@ JAttachment* JProvider::createDatabase(CheckStatusWrapper* user_status, const ch
CCH_init2(tdbb);
VIO_init(tdbb);
if (options.dpb_parallel_workers)
{
attachment->att_parallel_workers = options.dpb_parallel_workers;
}
if (options.dpb_set_db_readonly)
{
if (!CCH_exclusive(tdbb, LCK_EX, WAIT_PERIOD, &dbbGuard))
@ -3186,6 +3204,8 @@ JAttachment* JProvider::createDatabase(CheckStatusWrapper* user_status, const ch
attachment->att_trace_manager->event_attach(&conn, true, ITracePlugin::RESULT_SUCCESS);
}
WorkerAttachment::incUserAtts(dbb->dbb_filename);
jAtt->getStable()->manualUnlock(attachment->att_flags);
return jAtt;
@ -3436,6 +3456,9 @@ void JAttachment::internalDropDatabase(CheckStatusWrapper* user_status)
Arg::Gds(isc_obj_in_use) << Arg::Str(file_name));
}
if (!(attachment->att_flags & ATT_worker))
WorkerAttachment::decUserAtts(dbb->dbb_filename);
// Lock header page before taking database lock
header = (Ods::header_page*) CCH_FETCH(tdbb, &window, LCK_write, pag_header);
@ -4571,6 +4594,7 @@ void JProvider::shutdown(CheckStatusWrapper* status, unsigned int timeout, const
ThreadContextHolder tdbb;
WorkerAttachment::shutdown();
EDS::Manager::shutdown();
ULONG attach_count, database_count, svc_count;
@ -6817,6 +6841,7 @@ void DatabaseOptions::get(const UCHAR* dpb, USHORT dpb_length, bool& invalid_cli
dpb_overwrite = false;
dpb_sql_dialect = 99;
invalid_client_SQL_dialect = false;
dpb_parallel_workers = Config::getParallelWorkers();
if (dpb_length == 0)
return;
@ -7199,6 +7224,23 @@ void DatabaseOptions::get(const UCHAR* dpb, USHORT dpb_length, bool& invalid_cli
dpb_clear_map = rdr.getBoolean();
break;
case isc_dpb_parallel_workers:
dpb_parallel_workers = (SSHORT) rdr.getInt();
if (dpb_parallel_workers > Config::getMaxParallelWorkers() ||
dpb_parallel_workers < 0)
{
string str;
str.printf("Wrong parallel workers value %i, valid range are from 1 to %i",
dpb_parallel_workers, Config::getMaxParallelWorkers());
ERR_post(Arg::Gds(isc_bad_dpb_content) << Arg::Gds(isc_random) << Arg::Str(str));
}
break;
case isc_dpb_worker_attach:
dpb_worker_attach = true;
break;
default:
break;
}
@ -7206,6 +7248,12 @@ void DatabaseOptions::get(const UCHAR* dpb, USHORT dpb_length, bool& invalid_cli
if (! rdr.isEof())
ERR_post(Arg::Gds(isc_bad_dpb_form));
if (dpb_worker_attach)
{
dpb_parallel_workers = 1;
dpb_no_db_triggers = true;
}
}
@ -7866,6 +7914,8 @@ bool JRD_shutdown_database(Database* dbb, const unsigned flags)
fb_assert(!dbb->locked());
WorkerAttachment::shutdownDbb(dbb);
try
{
#ifdef SUPERSERVER_V2
@ -8334,6 +8384,9 @@ static void purge_attachment(thread_db* tdbb, StableAttachmentPart* sAtt, unsign
if (attachment->att_flags & ATT_overwrite_check)
shutdownFlags |= SHUT_DBB_OVERWRITE_CHECK;
if (!(attachment->att_flags & ATT_worker))
WorkerAttachment::decUserAtts(dbb->dbb_filename);
// Unlink attachment from database
release_attachment(tdbb, attachment);

View File

@ -2655,6 +2655,27 @@ void MET_lookup_index_expression(thread_db* tdbb, jrd_rel* relation, index_desc*
}
bool MET_lookup_index_expression_blr(thread_db* tdbb, const MetaName& index_name, bid& blob_id)
{
SET_TDBB(tdbb);
Attachment* attachment = tdbb->getAttachment();
bool found = false;
AutoCacheRequest request(tdbb, irq_l_exp_index_blr, IRQ_REQUESTS);
FOR(REQUEST_HANDLE request)
IDX IN RDB$INDICES WITH
IDX.RDB$INDEX_NAME EQ index_name.c_str()
{
found = !IDX.RDB$EXPRESSION_BLR.NULL;
blob_id = IDX.RDB$EXPRESSION_BLR;
}
END_FOR;
return found;
}
bool MET_lookup_partner(thread_db* tdbb, jrd_rel* relation, index_desc* idx, const TEXT* index_name)
{
/**************************************

View File

@ -108,6 +108,7 @@ bool MET_lookup_generator_id(Jrd::thread_db*, SLONG, Jrd::MetaName&, bool* sysG
void MET_update_generator_increment(Jrd::thread_db* tdbb, SLONG gen_id, SLONG step);
void MET_lookup_index(Jrd::thread_db*, Jrd::MetaName&, const Jrd::MetaName&, USHORT);
void MET_lookup_index_expression(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::index_desc*);
bool MET_lookup_index_expression_blr(Jrd::thread_db* tdbb, const Jrd::MetaName& index_name, Jrd::bid& blob_id);
SLONG MET_lookup_index_name(Jrd::thread_db*, const Jrd::MetaName&, SLONG*, Jrd::IndexStatus* status);
bool MET_lookup_partner(Jrd::thread_db*, Jrd::jrd_rel*, struct Jrd::index_desc*, const TEXT*);
Jrd::jrd_prc* MET_lookup_procedure(Jrd::thread_db*, const Jrd::QualifiedName&, bool);

View File

@ -146,7 +146,7 @@ bool FullTableScan::getRecord(thread_db* tdbb) const
return false;
}
if (VIO_next_record(tdbb, rpb, request->req_transaction, request->req_pool, false))
if (VIO_next_record(tdbb, rpb, request->req_transaction, request->req_pool, DPM_next_all))
{
if (impure->irsb_upper.isValid() && rpb->rpb_number > impure->irsb_upper)
{

View File

@ -1092,7 +1092,7 @@ bool Applier::lookupRecord(thread_db* tdbb,
rpb.rpb_relation = relation;
rpb.rpb_number.setValue(BOF_NUMBER);
while (VIO_next_record(tdbb, &rpb, transaction, m_request->req_pool, false))
while (VIO_next_record(tdbb, &rpb, transaction, m_request->req_pool, DPM_next_all))
{
const auto seq_record = rpb.rpb_record;
fb_assert(seq_record);

View File

@ -315,27 +315,7 @@ void Sort::get(thread_db* tdbb, ULONG** record_address)
try
{
// If there weren't any runs, everything fit in memory. Just return stuff.
if (!m_merge)
{
while (true)
{
if (m_records == 0)
{
record = NULL;
break;
}
m_records--;
if ( (record = *m_next_pointer++) )
break;
}
}
else
{
record = getMerge(m_merge);
}
record = getRecord();
*record_address = (ULONG*) record;
if (record)
@ -1376,6 +1356,33 @@ sort_record* Sort::getMerge(merge_control* merge)
}
sort_record* Sort::getRecord()
{
sort_record* record = NULL;
// If there weren't any runs, everything fit in memory. Just return stuff.
if (!m_merge)
{
while (true)
{
if (m_records == 0)
{
record = NULL;
break;
}
m_records--;
if ((record = *m_next_pointer++))
break;
}
}
else
record = getMerge(m_merge);
return record;
}
void Sort::init()
{
/**************************************
@ -2162,3 +2169,236 @@ void Sort::sortRunsBySeek(int n)
}
run->run_next = tail;
}
/// class PartitionedSort
PartitionedSort::PartitionedSort(Database* dbb, SortOwner* owner) :
m_owner(owner),
m_parts(owner->getPool()),
m_nodes(owner->getPool()),
m_merge(NULL)
{
}
PartitionedSort::~PartitionedSort()
{
// for (ULONG p = 0; p < m_parts.getCount(); p++)
// delete m_parts[p].srt_sort;
}
void PartitionedSort::buidMergeTree()
{
ULONG count = m_parts.getCount();
if (count <= 0)
return;
MemoryPool& pool = m_owner->getPool();
HalfStaticArray<run_merge_hdr*, 8> streams(pool);
run_merge_hdr** m1 = streams.getBuffer(count);
for (sort_control* sort = m_parts.begin(); sort < m_parts.end(); sort++)
*m1++ = &sort->srt_header;
merge_control* node = m_nodes.getBuffer(count - 1);
while (count > 1)
{
run_merge_hdr** m2 = m1 = streams.begin();
// "m1" is used to sequence through the runs being merged,
// while "m2" points at the new merged run
while (count >= 2)
{
m_merge = node++;
m_merge->mrg_header.rmh_type = RMH_TYPE_MRG;
// garbage watch
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || ((*m1)->rmh_type == RMH_TYPE_SORT));
(*m1)->rmh_parent = m_merge;
m_merge->mrg_stream_a = *m1++;
// garbage watch
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || ((*m1)->rmh_type == RMH_TYPE_SORT));
(*m1)->rmh_parent = m_merge;
m_merge->mrg_stream_b = *m1++;
m_merge->mrg_record_a = NULL;
m_merge->mrg_record_b = NULL;
*m2++ = (run_merge_hdr*)m_merge;
count -= 2;
}
if (count)
*m2++ = *m1++;
count = m2 - streams.begin();
}
if (m_merge)
m_merge->mrg_header.rmh_parent = NULL;
}
void PartitionedSort::get(thread_db* tdbb, ULONG** record_address)
{
sort_record* record = NULL;
if (!m_merge)
record = m_parts[0].srt_sort->getRecord();
else
record = getMerge();
*record_address = (ULONG*)record;
if (record)
m_parts[0].srt_sort->diddleKey((UCHAR*)record->sort_record_key, false, true);
}
sort_record* PartitionedSort::getMerge()
{
Sort* aSort = m_parts[0].srt_sort;
merge_control* merge = m_merge;
sort_record* record = NULL;
bool eof = false;
while (merge)
{
// If node is a run_control, get the next record (or not) and back to parent
if (merge->mrg_header.rmh_type == RMH_TYPE_SORT)
{
sort_control* sort = (sort_control*)merge;
merge = sort->srt_header.rmh_parent;
// check for end-of-file condition in either direction
record = sort->srt_sort->getRecord();
if (!record)
{
record = (sort_record*)-1;
eof = true;
continue;
}
eof = false;
continue;
}
// If've we got a record, somebody asked for it. Find out who.
if (record)
{
if (merge->mrg_stream_a && !merge->mrg_record_a)
{
if (eof)
merge->mrg_stream_a = NULL;
else
merge->mrg_record_a = record;
}
else if (eof)
merge->mrg_stream_b = NULL;
else
merge->mrg_record_b = record;
}
// If either streams need a record and is still active, loop back to pick
// up the record. If either stream is dry, return the record of the other.
// If both are dry, indicate eof for this stream.
record = NULL;
eof = false;
if (!merge->mrg_record_a && merge->mrg_stream_a)
{
merge = (merge_control*)merge->mrg_stream_a;
continue;
}
if (!merge->mrg_record_b)
{
if (merge->mrg_stream_b) {
merge = (merge_control*)merge->mrg_stream_b;
}
else if ((record = merge->mrg_record_a))
{
merge->mrg_record_a = NULL;
merge = merge->mrg_header.rmh_parent;
}
else
{
eof = true;
record = (sort_record*)-1;
merge = merge->mrg_header.rmh_parent;
}
continue;
}
if (!merge->mrg_record_a)
{
record = merge->mrg_record_b;
merge->mrg_record_b = NULL;
merge = merge->mrg_header.rmh_parent;
continue;
}
// We have prospective records from each of the sub-streams. Compare them.
// If equal, offer each to user routine for possible sacrifice.
SORTP *p = merge->mrg_record_a->sort_record_key;
SORTP *q = merge->mrg_record_b->sort_record_key;
//l = m_key_length;
ULONG l = aSort->m_unique_length;
DO_32_COMPARE(p, q, l);
if (l == 0 && aSort->m_dup_callback)
{
UCHAR* rec_a = (UCHAR*)merge->mrg_record_a;
UCHAR* rec_b = (UCHAR*)merge->mrg_record_a;
aSort->diddleKey(rec_a, false, true);
aSort->diddleKey(rec_b, false, true);
if ((*aSort->m_dup_callback) ((const UCHAR*)merge->mrg_record_a,
(const UCHAR*)merge->mrg_record_b,
aSort->m_dup_callback_arg))
{
merge->mrg_record_a = NULL;
aSort->diddleKey(rec_b, true, true);
continue;
}
aSort->diddleKey(rec_a, true, true);
aSort->diddleKey(rec_b, true, true);
}
if (l == 0)
{
l = aSort->m_key_length - aSort->m_unique_length;
if (l != 0)
DO_32_COMPARE(p, q, l);
}
if (p[-1] < q[-1])
{
record = merge->mrg_record_a;
merge->mrg_record_a = NULL;
}
else
{
record = merge->mrg_record_b;
merge->mrg_record_b = NULL;
}
merge = merge->mrg_header.rmh_parent;
}
// Merge pointer is null; we're done. Return either the most
// recent record, or end of file, as appropriate.
return eof ? NULL : record;
}

View File

@ -33,6 +33,7 @@ namespace Jrd {
// Forward declaration
class Attachment;
class Sort;
class SortOwner;
struct merge_control;
@ -221,6 +222,7 @@ struct run_merge_hdr
const int RMH_TYPE_RUN = 0;
const int RMH_TYPE_MRG = 1;
const int RMH_TYPE_SORT = 2;
// Run control block
@ -253,13 +255,26 @@ struct merge_control
run_merge_hdr* mrg_stream_b;
};
// Sort control block, for partitioned sort
struct sort_control
{
run_merge_hdr srt_header;
Sort* srt_sort;
};
// Sort class
typedef bool (*FPTR_REJECT_DUP_CALLBACK)(const UCHAR*, const UCHAR*, void*);
// flags as set in m_flags
const int scb_sorted = 1; // stream has been sorted
class Sort
{
friend class PartitionedSort;
public:
Sort(Database*, SortOwner*,
ULONG, FB_SIZE_T, FB_SIZE_T, const sort_key_def*,
@ -270,6 +285,11 @@ public:
void put(Jrd::thread_db*, ULONG**);
void sort(Jrd::thread_db*);
bool isSorted() const
{
return m_flags & scb_sorted;
}
static FB_UINT64 readBlock(TempSpace* space, FB_UINT64 seek, UCHAR* address, ULONG length)
{
const size_t bytes = space->read(seek, address, length);
@ -290,6 +310,7 @@ private:
void diddleKey(UCHAR*, bool, bool);
sort_record* getMerge(merge_control*);
sort_record* getRecord();
ULONG allocate(ULONG, ULONG, bool);
void init();
void mergeRuns(USHORT);
@ -333,9 +354,36 @@ private:
Firebird::Array<sort_key_def> m_description;
};
// flags as set in m_flags
const int scb_sorted = 1; // stream has been sorted
class PartitionedSort
{
public:
PartitionedSort(Database*, SortOwner*);
~PartitionedSort();
void get(Jrd::thread_db*, ULONG**);
void addPartition(Sort* sort)
{
sort_control item;
item.srt_header.rmh_type = RMH_TYPE_SORT;
item.srt_header.rmh_parent = NULL;
item.srt_sort = sort;
m_parts.add(item);
}
void buidMergeTree();
private:
sort_record* getMerge();
SortOwner* m_owner;
Firebird::HalfStaticArray<sort_control, 8> m_parts;
Firebird::HalfStaticArray<merge_control, 8> m_nodes; // nodes of merge tree
merge_control* m_merge; // root of merge tree
};
class SortOwner
{

View File

@ -2963,6 +2963,7 @@ bool Service::process_switches(ClumpletReader& spb, string& switches)
get_action_svc_data(spb, burp_database, bigint);
break;
case isc_spb_bkp_factor:
case isc_spb_bkp_parallel_workers:
case isc_spb_res_buffers:
case isc_spb_res_page_size:
case isc_spb_verbint:
@ -3046,6 +3047,7 @@ bool Service::process_switches(ClumpletReader& spb, string& switches)
case isc_spb_rpr_commit_trans:
case isc_spb_rpr_rollback_trans:
case isc_spb_rpr_recover_two_phase:
case isc_spb_rpr_par_workers:
if (!get_action_svc_parameter(spb.getClumpTag(), alice_in_sw_table, switches))
{
return false;

View File

@ -4163,13 +4163,12 @@ TraceSweepEvent::TraceSweepEvent(thread_db* tdbb)
TraceManager* trace_mgr = att->att_trace_manager;
m_start_clock = fb_utils::query_performance_counter();
m_need_trace = trace_mgr->needs(ITraceFactory::TRACE_EVENT_SWEEP);
if (!m_need_trace)
return;
m_start_clock = fb_utils::query_performance_counter();
TraceConnectionImpl conn(att);
trace_mgr->event_sweep(&conn, &m_sweep_info, ITracePlugin::SWEEP_STATE_STARTED);
}
@ -4238,12 +4237,19 @@ void TraceSweepEvent::report(ntrace_process_state_t state)
{
Attachment* att = m_tdbb->getAttachment();
const SINT64 finiTime = fb_utils::query_performance_counter() - m_start_clock;
if (state == ITracePlugin::SWEEP_STATE_FINISHED)
{
const SINT64 timeMs = finiTime * 1000 / fb_utils::query_performance_frequency();
gds__log("Sweep is finished\n"
"\tDatabase \"%s\" \n"
"\t%i workers, time %" SLONGFORMAT ".%03d sec \n"
"\tOIT %" SQUADFORMAT", OAT %" SQUADFORMAT", OST %" SQUADFORMAT", Next %" SQUADFORMAT,
att->att_filename.c_str(),
att->att_parallel_workers,
(int) timeMs / 1000, (unsigned int) timeMs % 1000,
m_sweep_info.getOIT(),
m_sweep_info.getOAT(),
m_sweep_info.getOST(),
@ -4264,9 +4270,7 @@ void TraceSweepEvent::report(ntrace_process_state_t state)
jrd_tra* tran = m_tdbb->getTransaction();
TraceRuntimeStats stats(att, &m_base_stats, &att->att_stats,
fb_utils::query_performance_counter() - m_start_clock,
0);
TraceRuntimeStats stats(att, &m_base_stats, &att->att_stats, finiTime, 0);
m_sweep_info.setPerf(stats.getPerf());
trace_mgr->event_sweep(&conn, &m_sweep_info, state);

View File

@ -89,6 +89,8 @@
#include "../jrd/GarbageCollector.h"
#include "../jrd/trace/TraceManager.h"
#include "../jrd/trace/TraceJrdHelpers.h"
#include "../common/Task.h"
#include "../jrd/WorkerAttachment.h"
using namespace Jrd;
using namespace Firebird;
@ -175,6 +177,394 @@ static bool set_security_class(thread_db*, Record*, USHORT);
static void set_system_flag(thread_db*, Record*, USHORT);
static void verb_post(thread_db*, jrd_tra*, record_param*, Record*);
namespace Jrd
{
class SweepTask : public Task
{
struct RelInfo; // forward decl
public:
SweepTask(thread_db* tdbb, MemoryPool* pool, TraceSweepEvent* traceSweep) : Task(),
m_pool(pool),
m_dbb(NULL),
m_trace(traceSweep),
m_items(*m_pool),
m_stop(false),
m_nextRelID(0),
m_lastRelID(0),
m_relInfo(*m_pool)
{
m_dbb = tdbb->getDatabase();
Attachment* att = tdbb->getAttachment();
int workers = 1;
if (att->att_parallel_workers > 0)
workers = att->att_parallel_workers;
for (int i = 0; i < workers; i++)
m_items.add(FB_NEW_POOL(*m_pool) Item(this));
m_items[0]->m_ownAttach = false;
m_items[0]->m_attStable = att->getStable();
m_items[0]->m_tra = tdbb->getTransaction();
m_relInfo.grow(m_items.getCount());
m_lastRelID = att->att_relations->count();
};
virtual ~SweepTask()
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
delete *p;
};
class Item : public Task::WorkItem
{
public:
Item(SweepTask* task) : Task::WorkItem(task),
m_inuse(false),
m_ownAttach(true),
m_tra(NULL),
m_relInfo(NULL),
m_firstPP(0),
m_lastPP(0)
{}
virtual ~Item()
{
if (!m_ownAttach || !m_attStable)
return;
Attachment* att = NULL;
{
AttSyncLockGuard guard(*m_attStable->getSync(), FB_FUNCTION);
att = m_attStable->getHandle();
if (!att)
return;
fb_assert(att->att_use_count > 0);
}
FbLocalStatus status;
if (m_tra)
{
BackgroundContextHolder tdbb(att->att_database, att, &status, FB_FUNCTION);
TRA_commit(tdbb, m_tra, false);
}
WorkerAttachment::releaseAttachment(&status, m_attStable);
}
SweepTask* getSweepTask() const
{
return reinterpret_cast<SweepTask*> (m_task);
}
bool init(thread_db* tdbb)
{
FbStatusVector* status = tdbb->tdbb_status_vector;
Attachment* att = NULL;
if (m_ownAttach && !m_attStable.hasData())
m_attStable = WorkerAttachment::getAttachment(status, getSweepTask()->m_dbb);
if (m_attStable)
att = m_attStable->getHandle();
if (!att)
{
Arg::Gds(isc_bad_db_handle).copyTo(status);
return false;
}
tdbb->setDatabase(att->att_database);
tdbb->setAttachment(att);
if (m_ownAttach && !m_tra)
{
const UCHAR sweep_tpb[] =
{
isc_tpb_version1, isc_tpb_read,
isc_tpb_read_committed, isc_tpb_rec_version
};
try
{
WorkerContextHolder holder(tdbb, FB_FUNCTION);
m_tra = TRA_start(tdbb, sizeof(sweep_tpb), sweep_tpb);
DPM_scan_pages(tdbb);
}
catch(const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
return false;
}
}
tdbb->setTransaction(m_tra);
tdbb->tdbb_flags |= TDBB_sweeper;
return true;
}
bool m_inuse;
bool m_ownAttach;
RefPtr<StableAttachmentPart> m_attStable;
jrd_tra* m_tra;
// part of work: relation, first and last PP's to work on
RelInfo* m_relInfo;
ULONG m_firstPP;
ULONG m_lastPP;
};
bool Handler(WorkItem& _item);
bool GetWorkItem(WorkItem** pItem);
bool GetResult(IStatus* status)
{
if (status)
{
status->init();
status->setErrors(m_status.getErrors());
}
return m_status.isSuccess();
}
int GetMaxWorkers()
{
return m_items.getCount();
}
private:
// item is handled, get next portion of work and update RelInfo
// also, detect if relation is handled completely
// return true if there is some more work to do
bool updateRelInfo(Item* item)
{
RelInfo* relInfo = item->m_relInfo;
if (relInfo->countPP == 0 || relInfo->nextPP >= relInfo->countPP)
{
relInfo->workers--;
return false;
}
item->m_firstPP = relInfo->nextPP;
item->m_lastPP = item->m_firstPP;
if (item->m_lastPP >= relInfo->countPP)
item->m_lastPP = relInfo->countPP - 1;
relInfo->nextPP = item->m_lastPP + 1;
return true;
}
void setError(IStatus* status, bool stopTask)
{
const bool copyStatus = (m_status.isSuccess() && status && status->getState() == IStatus::STATE_ERRORS);
if (!copyStatus && (!stopTask || m_stop))
return;
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (m_status.isSuccess() && copyStatus)
m_status.save(status);
if (stopTask)
m_stop = true;
}
MemoryPool* m_pool;
Database* m_dbb;
TraceSweepEvent* m_trace;
Mutex m_mutex;
HalfStaticArray<Item*, 8> m_items;
StatusHolder m_status;
volatile bool m_stop;
struct RelInfo
{
RelInfo()
{
memset(this, 0, sizeof(*this));
}
USHORT rel_id;
ULONG countPP; // number of pointer pages in relation
ULONG nextPP; // number of PP to assign to next worker
ULONG workers; // number of workers for this relation
};
USHORT m_nextRelID; // next relation to work on
USHORT m_lastRelID; // last relation to work on
HalfStaticArray<RelInfo, 8> m_relInfo; // relations worked on
};
bool SweepTask::Handler(WorkItem& _item)
{
Item* item = reinterpret_cast<Item*>(&_item);
ThreadContextHolder tdbb(NULL);
if (!item->init(tdbb))
{
setError(tdbb->tdbb_status_vector, true);
return false;
}
WorkerContextHolder wrkHolder(tdbb, FB_FUNCTION);
record_param rpb;
jrd_rel* relation = NULL;
try
{
RelInfo* relInfo = item->m_relInfo;
Database* dbb = tdbb->getDatabase();
Attachment* att = tdbb->getAttachment();
/*relation = (*att->att_relations)[relInfo->rel_id];
if (relation)*/
relation = MET_lookup_relation_id(tdbb, relInfo->rel_id, false);
if (relation &&
!(relation->rel_flags & (REL_deleted | REL_deleting)) &&
!relation->isTemporary() &&
relation->getPages(tdbb)->rel_pages)
{
jrd_rel::GCShared gcGuard(tdbb, relation);
if (!gcGuard.gcEnabled())
{
string str;
str.printf("Acquire garbage collection lock failed (%s)", relation->rel_name.c_str());
status_exception::raise(Arg::Gds(isc_random) << Arg::Str(str));
}
jrd_tra* tran = tdbb->getTransaction();
if (relInfo->countPP == 0)
relInfo->countPP = relation->getPages(tdbb)->rel_pages->count();
rpb.rpb_relation = relation;
rpb.rpb_org_scans = relation->rel_scan_count++;
rpb.rpb_record = NULL;
rpb.rpb_stream_flags = RPB_s_no_data | RPB_s_sweeper;
rpb.getWindow(tdbb).win_flags = WIN_large_scan;
rpb.rpb_number.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_firstPP);
rpb.rpb_number.decrement();
RecordNumber lastRecNo;
lastRecNo.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_lastPP + 1);
lastRecNo.decrement();
while (VIO_next_record(tdbb, &rpb, tran, NULL, DPM_next_pointer_page))
{
CCH_RELEASE(tdbb, &rpb.getWindow(tdbb));
if (relation->rel_flags & REL_deleting)
break;
if (rpb.rpb_number >= lastRecNo)
break;
if (m_stop)
break;
JRD_reschedule(tdbb);
tran->tra_oldest_active = dbb->dbb_oldest_snapshot;
}
delete rpb.rpb_record;
--relation->rel_scan_count;
}
return !m_stop;
}
catch(const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
delete rpb.rpb_record;
if (relation)
{
if (relation->rel_scan_count) {
--relation->rel_scan_count;
}
}
}
setError(tdbb->tdbb_status_vector, true);
return false;
}
bool SweepTask::GetWorkItem(WorkItem** pItem)
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
Item* item = reinterpret_cast<Item*> (*pItem);
if (item == NULL)
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
if (!(*p)->m_inuse)
{
(*p)->m_inuse = true;
*pItem = item = *p;
break;
}
}
else if (updateRelInfo(item))
return true;
if (!item)
return false;
// assign part of task to item
if (m_nextRelID >= m_lastRelID)
{
// find not handled relation and help to handle it
RelInfo* relInfo = m_relInfo.begin();
for (; relInfo < m_relInfo.end(); relInfo++)
if (relInfo->workers > 0)
{
item->m_relInfo = relInfo;
relInfo->workers++;
if (updateRelInfo(item))
return true;
}
item->m_inuse = false;
return false;
}
// start to handle next relation
USHORT relID = m_nextRelID++;
RelInfo* relInfo = m_relInfo.begin();
for (; relInfo < m_relInfo.end(); relInfo++)
if (relInfo->workers == 0)
{
relInfo->workers++;
relInfo->rel_id = relID;
relInfo->countPP = 0;
item->m_relInfo = relInfo;
item->m_firstPP = item->m_lastPP = 0;
relInfo->nextPP = item->m_lastPP + 1;
return true;
}
item->m_inuse = false;
return false;
}
}; // namespace Jrd
static bool assert_gc_enabled(const jrd_tra* transaction, const jrd_rel* relation)
{
/**************************************
@ -3275,7 +3665,7 @@ bool VIO_next_record(thread_db* tdbb,
record_param* rpb,
jrd_tra* transaction,
MemoryPool* pool,
bool onepage)
FindNextRecordScope scope)
{
/**************************************
*
@ -3311,7 +3701,7 @@ bool VIO_next_record(thread_db* tdbb,
#endif
do {
if (!DPM_next(tdbb, rpb, lock_type, onepage))
if (!DPM_next(tdbb, rpb, lock_type, scope))
{
return false;
}
@ -3899,6 +4289,25 @@ bool VIO_sweep(thread_db* tdbb, jrd_tra* transaction, TraceSweepEvent* traceSwee
DPM_scan_pages(tdbb);
if (attachment->att_parallel_workers != 0)
{
EngineCheckout cout(tdbb, FB_FUNCTION);
Coordinator coord(dbb->dbb_permanent);
SweepTask sweep(tdbb, dbb->dbb_permanent, traceSweep);
FbLocalStatus local_status;
local_status->init();
coord.RunSync(&sweep);
if (!sweep.GetResult(&local_status))
local_status.raise();
return true;
}
// hvlad: restore tdbb->transaction since it can be used later
tdbb->setTransaction(transaction);
@ -3943,7 +4352,7 @@ bool VIO_sweep(thread_db* tdbb, jrd_tra* transaction, TraceSweepEvent* traceSwee
gc->sweptRelation(transaction->tra_oldest_active, relation->rel_id);
}
while (VIO_next_record(tdbb, &rpb, transaction, 0, false))
while (VIO_next_record(tdbb, &rpb, transaction, 0, DPM_next_all))
{
CCH_RELEASE(tdbb, &rpb.getWindow(tdbb));
@ -3966,7 +4375,7 @@ bool VIO_sweep(thread_db* tdbb, jrd_tra* transaction, TraceSweepEvent* traceSwee
delete rpb.rpb_record;
} // try
catch (const Firebird::Exception&)
catch (const Exception&)
{
delete rpb.rpb_record;
@ -4892,7 +5301,7 @@ void Database::garbage_collector(Database* dbb)
bool rel_exit = false;
while (VIO_next_record(tdbb, &rpb, transaction, NULL, true))
while (VIO_next_record(tdbb, &rpb, transaction, NULL, DPM_next_data_page))
{
CCH_RELEASE(tdbb, &rpb.getWindow(tdbb));

View File

@ -26,7 +26,8 @@
#ifndef JRD_VIO_PROTO_H
#define JRD_VIO_PROTO_H
namespace Jrd {
namespace Jrd
{
class jrd_rel;
class jrd_tra;
class Record;
@ -35,6 +36,13 @@ namespace Jrd {
class Savepoint;
class Format;
class TraceSweepEvent;
enum FindNextRecordScope
{
DPM_next_all, // all pages
DPM_next_data_page, // one data page only
DPM_next_pointer_page // data pages from one pointer page
};
}
void VIO_backout(Jrd::thread_db*, Jrd::record_param*, const Jrd::jrd_tra*);
@ -52,7 +60,7 @@ bool VIO_get_current(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*,
void VIO_init(Jrd::thread_db*);
bool VIO_writelock(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*);
bool VIO_modify(Jrd::thread_db*, Jrd::record_param*, Jrd::record_param*, Jrd::jrd_tra*);
bool VIO_next_record(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*, MemoryPool*, bool);
bool VIO_next_record(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*, MemoryPool*, Jrd::FindNextRecordScope);
Jrd::Record* VIO_record(Jrd::thread_db*, Jrd::record_param*, const Jrd::Format*, MemoryPool*);
bool VIO_refetch_record(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*, bool, bool);
void VIO_store(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*);

View File

@ -2456,6 +2456,7 @@ void DatabaseAuth::accept(PACKET* send, Auth::WriterImplementation* authBlock)
// remove tags for specific internal attaches
case isc_dpb_map_attach:
case isc_dpb_sec_attach:
case isc_dpb_worker_attach:
// remove client's config information
case isc_dpb_config:

View File

@ -423,6 +423,8 @@ const SvcSwitches backupOptions[] =
{"bkp_keyname", putStringArgument, 0, isc_spb_bkp_keyname, 0 },
{"bkp_crypt", putStringArgument, 0, isc_spb_bkp_crypt, 0 },
{"bkp_zip", putOption, 0, isc_spb_bkp_zip, 0 },
{"bkp_parallel_workers", putIntArgument, 0, isc_spb_bkp_parallel_workers, 0},
{"bkp_direct_io", putOption, 0, isc_spb_bkp_direct_io, 0},
{0, 0, 0, 0, 0}
};
@ -453,6 +455,7 @@ const SvcSwitches restoreOptions[] =
{"res_keyname", putStringArgument, 0, isc_spb_res_keyname, 0 },
{"res_crypt", putStringArgument, 0, isc_spb_res_crypt, 0 },
{"res_replica_mode", putReplicaMode, 0, isc_spb_res_replica_mode, 0},
{"res_parallel_workers", putIntArgument, 0, isc_spb_res_parallel_workers, 0},
{0, 0, 0, 0, 0}
};
@ -498,6 +501,7 @@ const SvcSwitches repairOptions[] =
{"rpr_sweep_db", putOption, 0, isc_spb_rpr_sweep_db, 0},
{"rpr_list_limbo_trans", putOption, 0, isc_spb_rpr_list_limbo_trans, isc_info_svc_limbo_trans},
{"rpr_icu", putOption, 0, isc_spb_rpr_icu, 0},
{"rpr_par_workers", putIntArgument, 0, isc_spb_rpr_par_workers, 0},
{0, 0, 0, 0, 0}
};