8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-22 18:43:02 +01:00

Merge pull request #7212 from FirebirdSQL/work/parallel_v5

Work/parallel v5
This commit is contained in:
Vlad Khorsun 2022-06-21 11:46:49 +03:00 committed by GitHub
commit 6325174cfc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 6730 additions and 1477 deletions

View File

@ -1057,6 +1057,36 @@
#SecurityDatabase = $(dir_secDb)/security5.fdb
# ============================
# Settings for parallel work
# ============================
#
# Limits the total number of parallel workers that could be created within a
# single Firebird process for each attached database.
# Note, workers are accounted for each attached database independently.
#
# Valid values are from 1 (no parallelism) to 64. All other values
# silently ignored and default value of 1 is used.
# Per-process.
#
# Type: integer
#
#MaxParallelWorkers = 1
#
# Default number of parallel workers for the single task. For more details
# see doc/README.parallel_features.
#
# Valid values are from 1 (no parallelism) to MaxParallelWorkers (above).
# Values less than 1 is silently ignored and default value of 1 is used.
# Per-process.
#
# Type: integer
#
#ParallelWorkers = 1
# ==============================
# Settings for Windows platforms
# ==============================

View File

@ -139,6 +139,7 @@
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="..\..\..\src\burp\burp.cpp" />
<ClCompile Include="..\..\..\src\burp\BurpTasks.cpp" />
<ClCompile Include="..\..\..\src\burp\canonical.cpp" />
<ClCompile Include="..\..\..\src\burp\misc.cpp" />
<ClCompile Include="..\..\..\src\burp\mvol.cpp" />
@ -154,6 +155,7 @@
<ItemGroup>
<ClInclude Include="..\..\..\src\burp\backu_proto.h" />
<ClInclude Include="..\..\..\src\burp\burp.h" />
<ClInclude Include="..\..\..\src\burp\BurpTasks.h" />
<ClInclude Include="..\..\..\src\burp\burp_proto.h" />
<ClInclude Include="..\..\..\src\burp\burpswi.h" />
<ClInclude Include="..\..\..\src\burp\canon_proto.h" />

View File

@ -39,6 +39,9 @@
<ClCompile Include="..\..\..\gen\burp\restore.cpp">
<Filter>BURP files\Generated files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\burp\BurpTasks.cpp">
<Filter>BURP files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="..\..\..\src\burp\backup.epp">
@ -79,5 +82,8 @@
<ClInclude Include="..\..\..\src\burp\resto_proto.h">
<Filter>Header files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\burp\BurpTasks.h">
<Filter>Header files</Filter>
</ClInclude>
</ItemGroup>
</Project>

View File

@ -94,6 +94,7 @@
<ClCompile Include="..\..\..\src\common\StatementMetadata.cpp" />
<ClCompile Include="..\..\..\src\common\StatusArg.cpp" />
<ClCompile Include="..\..\..\src\common\StatusHolder.cpp" />
<ClCompile Include="..\..\..\src\common\Task.cpp" />
<ClCompile Include="..\..\..\src\common\TextType.cpp" />
<ClCompile Include="..\..\..\src\common\ThreadData.cpp" />
<ClCompile Include="..\..\..\src\common\ThreadStart.cpp" />
@ -211,6 +212,7 @@
<ClInclude Include="..\..\..\src\common\StatusArg.h" />
<ClInclude Include="..\..\..\src\common\StatusHolder.h" />
<ClInclude Include="..\..\..\src\common\stuff.h" />
<ClInclude Include="..\..\..\src\common\Task.h" />
<ClInclude Include="..\..\..\src\common\TextType.h" />
<ClInclude Include="..\..\..\src\common\ThreadData.h" />
<ClInclude Include="..\..\..\src\common\ThreadStart.h" />

View File

@ -249,6 +249,9 @@
<ClCompile Include="..\..\..\src\common\Int128.cpp">
<Filter>common</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\common\Task.cpp">
<Filter>common</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\common\classes\TimerImpl.cpp">
<Filter>classes</Filter>
</ClCompile>
@ -602,6 +605,9 @@
<ClInclude Include="..\..\..\src\common\Int128.h">
<Filter>headers</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\common\Task.h">
<Filter>headers</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\common\classes\TimerImpl.h">
<Filter>headers</Filter>
</ClInclude>

View File

@ -173,6 +173,7 @@
<ClCompile Include="..\..\..\src\jrd\validation.cpp" />
<ClCompile Include="..\..\..\src\jrd\vio.cpp" />
<ClCompile Include="..\..\..\src\jrd\VirtualTable.cpp" />
<ClCompile Include="..\..\..\src\jrd\WorkerAttachment.cpp" />
<ClCompile Include="..\..\..\src\lock\lock.cpp" />
<ClCompile Include="..\..\..\src\utilities\gsec\gsec.cpp" />
<ClCompile Include="..\..\..\src\utilities\gstat\ppg.cpp" />
@ -359,6 +360,7 @@
<ClInclude Include="..\..\..\src\jrd\vio_debug.h" />
<ClInclude Include="..\..\..\src\jrd\vio_proto.h" />
<ClInclude Include="..\..\..\src\jrd\VirtualTable.h" />
<ClInclude Include="..\..\..\src\jrd\WorkerAttachment.h" />
</ItemGroup>
<ItemGroup>
<None Include="..\..\..\src\dsql\DdlNodes.epp" />

View File

@ -522,6 +522,9 @@
<ClCompile Include="..\..\..\src\jrd\optimizer\Retrieval.cpp">
<Filter>Optimizer</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\jrd\WorkerAttachment.cpp">
<Filter>JRD files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\src\jrd\recsrc\RecordSource.h">
@ -1064,6 +1067,9 @@
<ClInclude Include="..\..\..\src\jrd\QualifiedName.h">
<Filter>Header files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\jrd\WorkerAttachment.h">
<Filter>Header files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<None Include="..\..\..\src\dsql\DdlNodes.epp">

View File

@ -1,4 +1,9 @@
In Firebird 4.0 a new switch was added to gbak: -INCLUDE(_DATA).
gbak enhancements in Firebird v4.
---------------------------------
A new switch was added to gbak: -INCLUDE(_DATA).
Author: Dimitry Sibiryakov <sd at ibphoenix com>
It takes one parameter which is "similar like" pattern matching
table names in a case-insensitive way.
@ -17,3 +22,93 @@ a table is following:
| MATCH | excluded | excluded | excluded |
| NOT MATCH | included | included | excluded |
+-----------+------------+------------+------------+
gbak enhancements in Firebird v5.
---------------------------------
1. Parallel execution.
Author: Vladyslav Khorsun <hvlad at users sourceforge net>
a) gbak backup
Backup could read source database tables using multiple threads in parallel.
New switch
-PAR(ALLEL) parallel workers
set number of workers that should be used for backup process. Default is 1.
Every additional worker creates own thread and own new connection used to read
data in parallel with other workers. All worker connections shares same database
snapshot to ensure consistent data view across all of its. Workers are created
and managed by gbak itself. Note, metadata still reads by single thread.
b) gbak restore
Restore could put data into user tables using multiple threads in parallel.
New switch
-PAR(ALLEL) parallel workers
set number of workers that should be used for restore process. Default is 1.
Every additional worker creates own thread and own new connection used to load
data in parallel with other workers. Metadata is still created using single
thread. Also, "main" connection uses DPB tag isc_dpb_parallel_workers to pass
the value of switch -PARALLEL to the engine - it allows to use engine ability
to build indices in parallel. If -PARALLEL switch is not used gbak will load
data using single thread and will not use DPB tag isc_dpb_parallel_workers. In
this case engine will use value of ParallelWorkers setting when building
indices, i.e. this phase could be run in parallel by the engine itself. To
fully avoid parallel operations when restoring database, use -PARALLEL 1.
Note, gbak not uses firebird.conf by itself and ParallelWorkers setting does
not affect its operations.
Examples.
Set in firebird.conf ParallelWorkers = 4, MaxParallelWorkers = 8 and restart
Firebird server.
a) backup using 2 parallel workers
gbak -b <database> <backup> -parallel 2
Here gbak will read user data using 2 connections and 2 threads.
b) restore using 2 parallel workers
gbak -r <backup> <database> -parallel 2
Here gbak will put user data using 2 connections and 2 threads. Also,
engine will build indices using 2 connections and 2 threads.
c) restore using no parallel workers but let engine to decide how many worker
should be used to build indices
gbak -r <backup> <database>
Here gbak will put user data using single connection. Eengine will build
indices using 4 connections and 4 threads as set by ParallelWorkers.
d) restore using no parallel workers and not allow engine build indices in
parallel
gbak -r <backup> <database> -par 1
2. Direct IO for backup files.
New switch
-D(IRECT_IO) direct IO for backup file(s)
instruct gbak to open\create backup file(s) in direct IO (or unbuferred) mode.
It allows to not consume file system cache memory for backup files. Usually
backup is read (by restore) or write (by backup) just once and there is no big
use from caching it contents. Performance should not suffer as gbak uses
sequential IO with relatively big chunks.
Direct IO mode is silently ignored if backup file is redirected into standard
input\output, i.e. if "stdin"\"stdout" is used as backup file name.

View File

@ -0,0 +1,80 @@
Firebird engine parallel features in v5.
----------------------------------------
Author: Vladyslav Khorsun <hvlad at users sourceforge net>
The Firebird engine can now execute some tasks using multiple threads in
parallel. Currently parallel execution is implemented for the sweep and the
index creation tasks. Parallel execution is supported for both auto- and manual
sweep.
To handle same task by multiple threads engine runs additional worker threads
and creates internal worker attachments. By default, parallel execution is not
enabled. There are two ways to enable parallelism in user attachment:
- set number of parallel workers in DPB using new tag isc_dpb_parallel_workers,
- set default number of parallel workers using new setting ParallelWorkers in
firebird.conf.
For gfix utility there is new command-line switch -parallel that allows to
set number of parallel workers for the sweep task. For example:
gfix -sweep -parallel 4 <database>
will run sweep on given database and ask engine to use 4 workers. gfix uses DPB
tag isc_dpb_parallel_workers when attaches to <database>, if switch -parallel
is present.
New firebird.conf setting ParallelWorkers set default number of parallel
workers that can be used by any user attachment running parallelizable task.
Default value is 1 and means no use of additional parallel workers. Value in
DPB have higher priority than setting in firebird.conf.
To control number of additional workers that can be created by the engine
there are two new settings in firebird.conf:
- ParallelWorkers - set default number of parallel workers that used by user
attachments.
Could be overriden by attachment using tag isc_dpb_parallel_workers in DPB.
- MaxParallelWorkers - limit number of simultaneously used workers for the
given database and Firebird process.
Internal worker attachments are created and managed by the engine itself.
Engine maintains per-database pools of worker attachments. Number of items in
each of such pool is limited by value of MaxParallelWorkers setting. The pools
are created by each Firebird process independently.
In Super Server architecture worker attachments are implemented as light-
weight system attachments, while in Classic and Super Classic its looks like
usual user attachments. All worker attachments are embedded into creating
server process. Thus in Classic architectures there is no additional server
processes. Worker attachments are present in monitoring tables. Idle worker
attachment is destroyed after 60 seconds of inactivity. Also, in Classic
architectures worker attachments are destroyed immediately after last user
connection detached from database.
Examples:
Set in firebird.conf ParallelWorkers = 4, MaxParallelWorkers = 8 and restart
Firebird server.
a) Connect to test database not using isc_dpb_parallel_workers in DPB and
execute "CREATE INDEX ..." SQL statement. On commit the index will be actually
created and engine will use 3 additional worker attachments. In total, 4
attachments in 4 threads will work on index creation.
b) Ensure auto-sweep is enabled for test database. When auto-sweep will run on
that database, it also will use 3 additional workers (and run within 4 threads).
c) more than one single task at time could be parallelized: make 2 attachments
and execute "CREATE INDEX ..." in each of them (of course indices to be built
should be different). Each index will be created using 4 attachments (1 user
and 3 worker) and 4 threads.
d) run gfix -sweep <database> - not specifying switch -parallel: sweep will run
using 4 attachments in 4 threads.
d) run gfix -sweep -parallel 2 <database>: sweep will run using 2 attachments in
2 threads. This shows that value in DPB tag isc_dpb_parallel_workers overrides
value of setting ParallelWorkers.

View File

@ -272,6 +272,18 @@ int alice(Firebird::UtilSvc* uSvc)
}
}
if (table->in_sw_value & sw_parallel_workers)
{
if (--argc <= 0) { // TODO: error message!
ALICE_error(6); // msg 6: number of page buffers for cache required
}
ALICE_upper_case(*argv++, string, sizeof(string));
if ((!(tdgbl->ALICE_data.ua_parallel_workers = atoi(string))) && (strcmp(string, "0")))
{
ALICE_error(7); // msg 7: numeric value required
}
}
if (table->in_sw_value & sw_housekeeping)
{
if (--argc <= 0) {

View File

@ -93,6 +93,7 @@ struct user_action
USHORT ua_db_SQL_dialect;
alice_shut_mode ua_shutdown_mode;
alice_repl_mode ua_replica_mode;
SSHORT ua_parallel_workers;
};

View File

@ -60,6 +60,7 @@ const SINT64 sw_buffers = 0x0000000020000000L;
const SINT64 sw_mode = 0x0000000040000000L;
const SINT64 sw_set_db_dialect = 0x0000000080000000L;
const SINT64 sw_trusted_auth = QUADCONST(0x0000000100000000); // Byte 4, Bit 0
const SINT64 sw_parallel_workers= QUADCONST(0x0000000200000000);
const SINT64 sw_fetch_password = QUADCONST(0x0000000800000000);
const SINT64 sw_nolinger = QUADCONST(0x0000001000000000);
const SINT64 sw_icu = QUADCONST(0x0000002000000000);
@ -124,7 +125,8 @@ enum alice_switches
IN_SW_ALICE_NOLINGER = 47,
IN_SW_ALICE_ICU = 48,
IN_SW_ALICE_ROLE = 49,
IN_SW_ALICE_REPLICA = 50
IN_SW_ALICE_REPLICA = 50,
IN_SW_ALICE_PARALLEL_WORKERS = 51
};
static const char* const ALICE_SW_ASYNC = "ASYNC";
@ -213,6 +215,9 @@ static const Switches::in_sw_tab_t alice_in_sw_table[] =
{IN_SW_ALICE_PROMPT, 0, "PROMPT", sw_prompt,
sw_list, 0, false, false, 41, 2, NULL},
// msg 41: \t-prompt\t\tprompt for commit/rollback (-l)
{IN_SW_ALICE_PARALLEL_WORKERS, isc_spb_rpr_par_workers, "PARALLEL", sw_parallel_workers,
sw_sweep, 0, false, false, 136, 3, NULL},
// msg 136: -par(allel) parallel workers <n> (-sweep)
{IN_SW_ALICE_PASSWORD, 0, "PASSWORD", sw_password,
0, (sw_trusted_auth | sw_fetch_password),
false, false, 42, 2, NULL},

View File

@ -325,6 +325,10 @@ static void buildDpb(Firebird::ClumpletWriter& dpb, const SINT64 switches)
dpb.insertByte(isc_dpb_set_db_replica, tdgbl->ALICE_data.ua_replica_mode);
}
if (switches & sw_parallel_workers) {
dpb.insertInt(isc_dpb_parallel_workers, tdgbl->ALICE_data.ua_parallel_workers);
}
if (switches & sw_nolinger)
dpb.insertTag(isc_dpb_nolinger);

1128
src/burp/BurpTasks.cpp Normal file

File diff suppressed because it is too large Load Diff

611
src/burp/BurpTasks.h Normal file
View File

@ -0,0 +1,611 @@
/*
* PROGRAM: JRD Backup and Restore Program
* MODULE: BurpTasks.h
* DESCRIPTION:
*
* The contents of this file are subject to the Interbase Public
* License Version 1.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy
* of the License at http://www.Inprise.com/IPL.html
*
* Software distributed under the License is distributed on an
* "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express
* or implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#ifndef BURP_TASKS_H
#define BURP_TASKS_H
#include <stdio.h>
#include "../common/common.h"
#include "../burp/burp.h"
#include "../common/ThreadData.h"
#include "../common/Task.h"
#include "../common/UtilSvc.h"
#include "../common/classes/array.h"
#include "../common/classes/auto.h"
#include "../common/classes/condition.h"
#include "../common/classes/fb_atomic.h"
namespace Burp {
class ReadRelationMeta
{
public:
ReadRelationMeta() :
m_blr(*getDefaultMemoryPool())
{
clear();
}
void setRelation(const burp_rel* relation, bool partition);
void clear();
bool haveInputs() const
{
return m_inMgsNum != m_outMgsNum;
}
//private:
const burp_rel* m_relation;
SSHORT m_fldCount;
SSHORT m_inMgsNum;
SSHORT m_outMgsNum;
Firebird::HalfStaticArray<UCHAR, 256> m_blr;
ULONG m_outMsgLen;
ULONG m_outRecLen;
ULONG m_outEofOffset;
};
class ReadRelationReq
{
public:
ReadRelationReq() :
m_outMsg(*getDefaultMemoryPool())
{
m_relation = NULL;
m_meta = NULL;
memset(&m_inMgs, 0, sizeof(m_inMgs));
m_eof = NULL;
m_request = 0;
}
~ReadRelationReq()
{
clear();
}
void reset(const ReadRelationMeta* meta);
void clear();
void compile(Firebird::CheckStatusWrapper* status, Firebird::IAttachment* db);
void setParams(ULONG loPP, ULONG hiPP);
void start(Firebird::CheckStatusWrapper* status, Firebird::ITransaction* tran);
void receive(Firebird::CheckStatusWrapper* status);
void release(Firebird::CheckStatusWrapper* status);
const ReadRelationMeta* getMeta() const
{
return m_meta;
}
const UCHAR* getData() const
{
return m_outMsg.begin();
}
bool eof() const
{
return *m_eof;
}
private:
struct InMsg
{
ULONG loPP;
ULONG hiPP;
};
const burp_rel* m_relation;
const ReadRelationMeta* m_meta;
InMsg m_inMgs;
Firebird::Array<UCHAR> m_outMsg;
SSHORT* m_eof;
Firebird::IRequest* m_request;
};
class WriteRelationMeta
{
public:
WriteRelationMeta() :
m_blr(*getDefaultMemoryPool())
{
clear();
}
void setRelation(BurpGlobals* tdgbl, const burp_rel* relation);
void clear();
Firebird::IBatch* createBatch(BurpGlobals* tdgbl, Firebird::IAttachment* att);
//private:
bool prepareBatch(BurpGlobals* tdgbl);
void prepareRequest(BurpGlobals* tdgbl);
const burp_rel* m_relation;
Firebird::Mutex m_mutex;
bool m_batchMode;
bool m_batchOk;
ULONG m_inMsgLen;
// batch mode
Firebird::string m_sqlStatement;
Firebird::RefPtr<Firebird::IMessageMetadata> m_batchMeta;
unsigned m_batchStep;
unsigned m_batchInlineBlobLimit;
// request mode
SSHORT m_inMgsNum;
Firebird::HalfStaticArray<UCHAR, 256> m_blr;
};
class WriteRelationReq
{
public:
WriteRelationReq() :
m_inMsg(*getDefaultMemoryPool()),
m_batchMsg(*getDefaultMemoryPool())
{
m_relation = nullptr;
m_meta = nullptr;
m_batch = nullptr;
m_request = nullptr;
m_recs = 0;
m_resync = true;
}
~WriteRelationReq()
{
clear();
}
void reset(WriteRelationMeta* meta);
void clear();
void compile(BurpGlobals* tdgbl, Firebird::IAttachment* att);
void send(BurpGlobals* tdgbl, Firebird::ITransaction* tran, bool lastRec);
void release();
ULONG getDataLength() const
{
return m_inMsg.getCount();
}
UCHAR* getData()
{
return m_inMsg.begin();
}
Firebird::IBatch* getBatch() const
{
return m_batch;
}
ULONG getBatchMsgLength() const
{
return m_batchMsg.getCount();
}
UCHAR* getBatchMsgData()
{
return m_batchMsg.begin();
}
unsigned getBatchInlineBlobLimit() const
{
return m_meta->m_batchInlineBlobLimit;
}
private:
const burp_rel* m_relation;
WriteRelationMeta* m_meta;
Firebird::Array<UCHAR> m_inMsg;
Firebird::Array<UCHAR> m_batchMsg;
Firebird::IBatch* m_batch;
Firebird::IRequest* m_request;
int m_recs;
bool m_resync;
};
// forward declaration
class IOBuffer;
class BackupRelationTask : public Firebird::Task
{
public:
BackupRelationTask(BurpGlobals* tdgbl);
~BackupRelationTask();
void SetRelation(burp_rel* relation);
bool handler(WorkItem& _item);
bool getWorkItem(WorkItem** pItem);
bool getResult(Firebird::IStatus* status);
int getMaxWorkers();
class Item : public Firebird::Task::WorkItem
{
public:
Item(BackupRelationTask* task, bool writer) : WorkItem(task),
m_inuse(false),
m_writer(writer),
m_ownAttach(!writer),
m_gbl(NULL),
m_att(0),
m_tra(0),
m_relation(NULL),
m_ppSequence(0),
m_cleanBuffers(*getDefaultMemoryPool()),
m_buffer(NULL)
{}
BackupRelationTask* getBackupTask() const
{
return reinterpret_cast<BackupRelationTask*> (m_task);
}
class EnsureUnlockBuffer
{
public:
EnsureUnlockBuffer(Item* item) : m_item(item) {}
~EnsureUnlockBuffer();
private:
Item* m_item;
};
bool m_inuse;
bool m_writer; // file writer or table reader
bool m_ownAttach;
BurpGlobals* m_gbl;
Firebird::IAttachment* m_att;
Firebird::ITransaction* m_tra;
burp_rel* m_relation;
ReadRelationReq m_request;
ULONG m_ppSequence; // PP to read
Firebird::Mutex m_mutex;
Firebird::HalfStaticArray<IOBuffer*, 2> m_cleanBuffers;
IOBuffer* m_buffer;
Firebird::Condition m_cleanCond;
};
static BackupRelationTask* getBackupTask(BurpGlobals* tdgbl);
BurpGlobals* getMasterGbl() const
{
return m_masterGbl;
}
static void recordAdded(BurpGlobals* tdgbl); // reader
static IOBuffer* renewBuffer(BurpGlobals* tdgbl); // reader
bool isStopped() const
{
return m_stop;
}
Firebird::Mutex burpOutMutex;
private:
void initItem(BurpGlobals* tdgbl, Item& item);
void freeItem(Item& item);
void stopItems();
bool fileWriter(Item& item);
bool tableReader(Item& item);
void releaseBuffer(Item& item); // reader
IOBuffer* getCleanBuffer(Item& item); // reader
void putDirtyBuffer(IOBuffer* buf); // reader
IOBuffer* getDirtyBuffer(); // writer
void putCleanBuffer(IOBuffer* buf); // writer
BurpGlobals* m_masterGbl;
burp_rel* m_relation;
ReadRelationMeta m_metadata;
int m_readers; // number of active readers, could be less than items allocated
bool m_readDone; // true when all readers are done
ULONG m_nextPP;
Firebird::Mutex m_mutex;
Firebird::HalfStaticArray<Item*, 8> m_items;
ISC_STATUS_ARRAY m_status;
volatile bool m_stop;
bool m_error;
Firebird::HalfStaticArray<IOBuffer*, 16> m_buffers;
Firebird::HalfStaticArray<IOBuffer*, 8> m_dirtyBuffers;
Firebird::Condition m_dirtyCond;
};
class RestoreRelationTask : public Firebird::Task
{
public:
RestoreRelationTask(BurpGlobals* tdgbl);
~RestoreRelationTask();
void SetRelation(BurpGlobals* tdgbl, burp_rel* relation);
bool handler(WorkItem& _item);
bool getWorkItem(WorkItem** pItem);
bool getResult(Firebird::IStatus* status);
int getMaxWorkers();
class Item : public Firebird::Task::WorkItem
{
public:
Item(RestoreRelationTask* task, bool reader) : WorkItem(task),
m_inuse(false),
m_reader(reader),
m_ownAttach(!reader),
m_gbl(NULL),
m_att(0),
m_tra(0),
m_relation(NULL),
m_buffer(NULL)
{}
RestoreRelationTask* getRestoreTask() const
{
return reinterpret_cast<RestoreRelationTask*> (m_task);
}
class EnsureUnlockBuffer
{
public:
EnsureUnlockBuffer(Item* item) : m_item(item) {}
~EnsureUnlockBuffer();
private:
Item* m_item;
};
bool m_inuse;
bool m_reader; // file reader or table writer
bool m_ownAttach;
BurpGlobals* m_gbl;
Firebird::IAttachment* m_att;
Firebird::ITransaction* m_tra;
burp_rel* m_relation;
WriteRelationReq m_request;
Firebird::Mutex m_mutex;
IOBuffer* m_buffer;
};
class ExcReadDone : public Firebird::Exception
{
public:
ExcReadDone() throw() : Firebird::Exception() { }
virtual void stuffByException(Firebird::StaticStatusVector& status_vector) const throw();
virtual const char* what() const throw();
static void raise();
};
static RestoreRelationTask* getRestoreTask(BurpGlobals* tdgbl);
BurpGlobals* getMasterGbl() const
{
return m_masterGbl;
}
static IOBuffer* renewBuffer(BurpGlobals* tdgbl); // writer
bool isStopped() const
{
return m_stop;
}
rec_type getLastRecord() const
{
return m_lastRecord;
}
void verbRecs(FB_UINT64& records, bool total);
void verbRecsFinal();
// commit and detach all worker connections
bool finish();
Firebird::Mutex burpOutMutex;
private:
void initItem(BurpGlobals* tdgbl, Item& item);
bool freeItem(Item& item, bool commit);
bool fileReader(Item& item);
bool tableWriter(BurpGlobals* tdgbl, Item& item);
void releaseBuffer(Item& item); // writer
// reader needs clean buffer to read backup file into
IOBuffer* getCleanBuffer(); // reader
// put buffer full of records to be handled by writer
void putDirtyBuffer(IOBuffer* buf); // reader
IOBuffer* getDirtyBuffer(); // writer
void putCleanBuffer(IOBuffer* buf); // writer
void checkSpace(IOBuffer** pBuf, const FB_SIZE_T length, UCHAR** pData, FB_SIZE_T* pSpace);
IOBuffer* read_blob(BurpGlobals* tdgbl, IOBuffer* ioBuf);
IOBuffer* read_array(BurpGlobals* tdgbl, IOBuffer* ioBuf);
BurpGlobals* m_masterGbl;
burp_rel* m_relation;
rec_type m_lastRecord; // last backup record read for relation, usually rec_relation_end
WriteRelationMeta m_metadata;
int m_writers; // number of active writers, could be less than items allocated
bool m_readDone; // all records was read
Firebird::Mutex m_mutex;
Firebird::HalfStaticArray<Item*, 8> m_items;
ISC_STATUS_ARRAY m_status;
volatile bool m_stop;
bool m_error;
Firebird::AtomicCounter m_records; // records restored for the current relation
FB_UINT64 m_verbRecs; // last records count reported
Firebird::HalfStaticArray<IOBuffer*, 16> m_buffers;
Firebird::HalfStaticArray<IOBuffer*, 16> m_cleanBuffers;
Firebird::HalfStaticArray<IOBuffer*, 16> m_dirtyBuffers;
Firebird::Condition m_cleanCond;
Firebird::Condition m_dirtyCond;
};
class IOBuffer
{
public:
IOBuffer(void*, FB_SIZE_T size);
UCHAR* getBuffer() const
{
return m_aligned;
}
FB_SIZE_T getSize() const
{
return m_size;
}
FB_SIZE_T getRecs() const
{
return m_recs;
}
FB_SIZE_T getUsed() const
{
return m_used;
}
void setUsed(FB_SIZE_T used)
{
fb_assert(used <= m_size);
m_used = used;
}
void clear()
{
m_used = 0;
m_recs = 0;
m_next = NULL;
m_linked = false;
}
void recordAdded()
{
m_recs++;
}
void linkNext(IOBuffer* buf)
{
m_next = buf;
m_next->m_linked = true;
}
bool isLinked() const
{
return m_linked;
}
void lock()
{
m_mutex.enter(FB_FUNCTION);
fb_assert(m_locked >= 0);
m_locked++;
}
void unlock(bool opt = false)
{
if (opt) // unlock only if locked by me
{
if (m_locked == 0)
return;
if (!m_mutex.tryEnter(FB_FUNCTION))
return;
m_mutex.leave();
}
fb_assert(m_locked > 0);
m_locked--;
m_mutex.leave();
}
IOBuffer* getNext()
{
return m_next;
}
void* getItem() const
{
return m_item;
}
private:
void* const m_item;
Firebird::Array<UCHAR> m_memory;
UCHAR* m_aligned;
const FB_SIZE_T m_size;
FB_SIZE_T m_used;
FB_SIZE_T m_recs;
IOBuffer* m_next;
bool m_linked;
int m_locked;
Firebird::Mutex m_mutex;
};
class BurpMaster
{
public:
BurpMaster()
{
m_tdgbl = BurpGlobals::getSpecific();
m_task = BackupRelationTask::getBackupTask(m_tdgbl);
if (!m_tdgbl->master)
m_tdgbl = m_task->getMasterGbl();
if (m_task)
m_task->burpOutMutex.enter(FB_FUNCTION);
}
~BurpMaster()
{
if (m_task)
m_task->burpOutMutex.leave();
}
BurpGlobals* get() const
{
return m_tdgbl;
}
private:
BackupRelationTask* m_task;
BurpGlobals* m_tdgbl;
};
} // namespace Burp
#endif // BURP_TASKS_H

View File

@ -60,9 +60,11 @@
#include "../common/classes/BlobWrapper.h"
#include "../common/classes/MsgPrint.h"
#include "../burp/OdsDetection.h"
#include "../burp/BurpTasks.h"
using MsgFormat::SafeArg;
using Firebird::FbLocalStatus;
using namespace Firebird;
using namespace Burp;
// For service APIs the follow DB handle is a value stored
@ -115,7 +117,7 @@ void put_array(burp_fld*, burp_rel*, ISC_QUAD*);
void put_asciz(const att_type, const TEXT*);
void put_blob(burp_fld*, ISC_QUAD&);
bool put_blr_blob(att_type, ISC_QUAD&);
void put_data(burp_rel*);
void put_data(burp_rel*, ReadRelationReq*);
void put_index(burp_rel*);
int put_message(att_type, att_type, const TEXT*, const ULONG);
void put_int32(att_type, SLONG);
@ -250,6 +252,53 @@ int BACKUP_backup(const TEXT* dbb_file, const TEXT* file_name)
EXEC SQL SET TRANSACTION NAME gds_trans;
}
// get shared snapshot number if asked for parallel backup
tdgbl->tr_snapshot = 0;
if (gds_trans && tdgbl->gbl_sw_par_workers > 1)
{
UCHAR in_buf[] = {fb_info_tra_snapshot_number, isc_info_end};
UCHAR out_buf[16] = {0};
gds_trans->getInfo(fbStatus, sizeof(in_buf), in_buf, sizeof(out_buf), out_buf);
if (fbStatus->isEmpty())
{
UCHAR* p = out_buf, *e = out_buf + sizeof(out_buf);
while (p < e)
{
SSHORT len;
switch (*p++)
{
case isc_info_error:
case isc_info_end:
p = e;
break;
case fb_info_tra_snapshot_number:
len = isc_portable_integer(p, 2);
p += 2;
tdgbl->tr_snapshot = isc_portable_integer(p, len);
p += len;
break;
}
}
}
if (tdgbl->tr_snapshot == 0)
tdgbl->gbl_sw_par_workers = 1;
}
// detect if MAKE_DBKEY is supported and decide kind of read relation query
if (tdgbl->gbl_sw_par_workers > 1)
{
const char* sql = "SELECT MAKE_DBKEY(0, 0) FROM RDB$DATABASE";
IStatement* stmt = DB->prepare(fbStatus, gds_trans, 0, sql, 3, 0);
if (fbStatus->getState() & IStatus::RESULT_ERROR)
{
// BURP_print_status(false, isc_status);
tdgbl->gbl_sw_par_workers = 1;
}
if (stmt)
stmt->free(fbStatus);
}
// decide what type of database we've got
@ -349,6 +398,10 @@ int BACKUP_backup(const TEXT* dbb_file, const TEXT* file_name)
// Now go back and write all data
{
Coordinator coord(getDefaultMemoryPool());
BackupRelationTask task(tdgbl);
for (burp_rel* relation = tdgbl->relations; relation; relation = relation->rel_next)
{
put(tdgbl, (UCHAR) rec_relation_data);
@ -359,11 +412,18 @@ int BACKUP_backup(const TEXT* dbb_file, const TEXT* file_name)
{
put_index(relation);
if (!(tdgbl->gbl_sw_meta || tdgbl->skipRelation(relation->rel_name)))
put_data(relation);
{
task.SetRelation(relation);
coord.runSync(&task);
if (!task.getResult(NULL))
BURP_exit_local(FINI_ERROR, tdgbl);
}
}
put(tdgbl, (UCHAR) rec_relation_end);
}
}
// now for the new triggers in rdb$triggers
BURP_verbose(159);
@ -1039,6 +1099,7 @@ void put_array( burp_fld* field, burp_rel* relation, ISC_QUAD* blob_id)
if (!status_vector.isSuccess())
{
BurpMaster master;
BURP_print(false, 81, field->fld_name);
// msg 81 error accessing blob field %s -- continuing
BURP_print_status(false, &status_vector);
@ -1184,6 +1245,7 @@ void put_blob( burp_fld* field, ISC_QUAD& blob_id)
if (!blob.open(DB, gds_trans, blob_id))
{
BurpMaster master;
BURP_print(false, 81, field->fld_name);
// msg 81 error accessing blob field %s -- continuing
BURP_print_status(false, &status_vector);
@ -1407,7 +1469,7 @@ bool put_blr_blob( att_type attribute, ISC_QUAD& blob_id)
}
void put_data(burp_rel* relation)
void put_data(burp_rel* relation, ReadRelationReq* request)
{
/**************************************
*
@ -1420,296 +1482,75 @@ void put_data(burp_rel* relation)
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
USHORT field_count = 1; // eof field
BackupRelationTask* task = BackupRelationTask::getBackupTask(tdgbl);
const ReadRelationMeta* reqMeta = request->getMeta();
HalfStaticArray<burp_fld*, 4> blobFlds;
HalfStaticArray<burp_fld*, 4> arrayFlds;
SSHORT count = 0;
burp_fld* field;
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (!(field->fld_flags & FLD_computed))
{
field_count += 2;
}
}
fb_assert(field_count > 0 && field_count * 9 > 0 && field_count * 9 + 200 > 0);
// Time to generate blr to fetch data. Make sure we allocate a BLR buffer
// large enough to handle the per field overhead
UCHAR* const blr_buffer = BURP_alloc(200 + field_count * 9);
UCHAR* blr = blr_buffer;
add_byte(blr, blr_version4);
add_byte(blr, blr_begin);
add_byte(blr, blr_message);
add_byte(blr, 0); // Message number
add_word(blr, field_count); // Number of fields, counting eof
RCRD_OFFSET offset = 0;
USHORT count = 0; // This is param count.
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
SSHORT alignment = 4;
FLD_LENGTH length = field->fld_length;
SSHORT dtype = field->fld_type;
count += 2;
if (field->fld_flags & FLD_array)
{
dtype = blr_blob;
length = 8;
}
switch (dtype)
{
case blr_text:
alignment = type_alignments[dtype_text];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
break;
case blr_varying:
alignment = type_alignments[dtype_varying];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
length += sizeof(USHORT);
break;
case blr_short:
alignment = type_alignments[dtype_short];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_long:
alignment = type_alignments[dtype_long];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_quad:
alignment = type_alignments[dtype_quad];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int64:
alignment = type_alignments[dtype_int64];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int128:
alignment = type_alignments[dtype_int128];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_double:
alignment = type_alignments[dtype_double];
add_byte(blr, field->fld_type);
break;
case blr_timestamp:
alignment = type_alignments[dtype_timestamp];
add_byte(blr, field->fld_type);
break;
case blr_timestamp_tz:
alignment = type_alignments[dtype_timestamp_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_time:
alignment = type_alignments[dtype_sql_time];
add_byte(blr, field->fld_type);
break;
case blr_sql_time_tz:
alignment = type_alignments[dtype_sql_time_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_date:
alignment = type_alignments[dtype_sql_date];
add_byte(blr, field->fld_type);
break;
case blr_float:
alignment = type_alignments[dtype_real];
add_byte(blr, field->fld_type);
break;
case blr_blob:
alignment = type_alignments[dtype_blob];
add_byte(blr, blr_quad);
add_byte(blr, 0);
break;
case blr_bool:
alignment = type_alignments[dtype_boolean];
add_byte(blr, field->fld_type);
break;
case blr_dec64:
case blr_dec128:
alignment = type_alignments[dtype];
add_byte(blr, field->fld_type);
break;
default:
BURP_error_redirect(NULL, 26, SafeArg() << field->fld_type);
// msg 26 datatype %ld not understood
break;
}
if (alignment)
offset = FB_ALIGN(offset, alignment);
field->fld_offset = offset;
field->fld_parameter = count++;
offset += length;
arrayFlds.add(field);
else if (field->fld_type == blr_blob)
blobFlds.add(field);
}
count++; // eof
// Next, build fields for null flags
for (field = relation->rel_fields; field; field = field->fld_next)
FbLocalStatus status;
request->start(&status, gds_trans);
if (status->getState() & IStatus::STATE_ERRORS)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_short);
add_byte(blr, 0);
offset = FB_ALIGN(offset, sizeof(SSHORT));
field->fld_missing_parameter = count++;
offset += sizeof(SSHORT);
}
// Finally, make up an EOF field
add_byte(blr, blr_short); // eof field
add_byte(blr, 0); // scale for eof field
SSHORT eof_parameter = count++;
RCRD_OFFSET record_length = offset;
RCRD_OFFSET eof_offset = FB_ALIGN(offset, sizeof(SSHORT));
// To be used later for the buffer size to receive data
const RCRD_LENGTH length = (RCRD_LENGTH) (eof_offset + sizeof(SSHORT));
// Build FOR loop, body, and eof handler
add_byte(blr, blr_for);
add_byte(blr, blr_rse);
add_byte(blr, 1); // count of relations
add_byte(blr, blr_rid);
add_word(blr, relation->rel_id);
add_byte(blr, 0); // context variable
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, 0);
add_byte(blr, blr_begin);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 1);
add_byte(blr, blr_parameter);
add_byte(blr, 0);
add_word(blr, eof_parameter);
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_assignment);
add_byte(blr, blr_fid);
add_byte(blr, 0);
add_word(blr, field->fld_id);
add_byte(blr, blr_parameter2);
add_byte(blr, 0);
add_word(blr, field->fld_parameter);
add_word(blr, field->fld_missing_parameter);
}
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, 0);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 0);
add_byte(blr, blr_parameter);
add_byte(blr, 0);
add_word(blr, eof_parameter);
add_byte(blr, blr_end);
add_byte(blr, blr_eoc);
unsigned blr_length = blr - blr_buffer;
#ifdef DEBUG
if (debug_on)
fb_print_blr(blr_buffer, blr_length, NULL, NULL, 0);
#endif
// Compile request
FbLocalStatus status_vector;
Firebird::IRequest* request = DB->compileRequest(&status_vector, blr_length, blr_buffer);
if (!status_vector.isSuccess())
{
BURP_error_redirect(&status_vector, 27);
// msg 27 isc_compile_request failed
fb_print_blr(blr_buffer, blr_length, NULL, NULL, 0);
}
BURP_free(blr_buffer);
BURP_verbose(142, relation->rel_name);
// msg 142 writing data for relation %s
request->start(&status_vector, gds_trans, 0);
if (!status_vector.isSuccess())
{
BURP_error_redirect(&status_vector, 28);
BURP_error_redirect(&status, 28);
// msg 28 isc_start_request failed
}
// Here is the crux of the problem -- writing data. All this work
// for the following small loop.
UCHAR* buffer = BURP_alloc(length);
SSHORT* eof = (SSHORT *) (buffer + eof_offset);
const UCHAR* buffer = request->getData();
// the XDR representation may be even fluffier
lstring xdr_buffer;
if (tdgbl->gbl_sw_transportable)
{
xdr_buffer.lstr_length = xdr_buffer.lstr_allocated = length + count * 3;
xdr_buffer.lstr_length = xdr_buffer.lstr_allocated = reqMeta->m_outMsgLen + count * 3;
xdr_buffer.lstr_address = BURP_alloc(xdr_buffer.lstr_length);
}
else
xdr_buffer.lstr_address = NULL;
RCRD_OFFSET record_length = reqMeta->m_outRecLen;
FB_UINT64 records = 0;
while (true)
{
request->receive(&status_vector, 0, 0, length, buffer);
if (!status_vector.isSuccess())
if (task->isStopped())
break;
request->receive(&status);
if (status->getState() & IStatus::STATE_ERRORS)
{
BURP_error_redirect(&status_vector, 29);
BURP_error_redirect(&status, 29);
// msg 29 isc_receive failed
}
if (!*eof)
if (!request->eof())
break;
records++;
// Verbose records
if ((records % tdgbl->verboseInterval) == 0)
BURP_verbose(108, SafeArg() << records);
put(tdgbl, (UCHAR) rec_data);
put_int32(att_data_length, record_length);
const UCHAR* p;
if (tdgbl->gbl_sw_transportable)
{
record_length = CAN_encode_decode(relation, &xdr_buffer, buffer, true);
record_length = CAN_encode_decode(relation, &xdr_buffer, const_cast<UCHAR*>(buffer), TRUE);
put_int32(att_xdr_length, record_length);
p = xdr_buffer.lstr_address;
}
@ -1723,38 +1564,31 @@ void put_data(burp_rel* relation)
// Look for any blobs to write
for (field = relation->rel_fields; field; field = field->fld_next)
for (burp_fld** pField = blobFlds.begin(); pField < blobFlds.end(); pField++)
{
if (field->fld_type == blr_blob &&
!(field->fld_flags & FLD_computed) && !(field->fld_flags & FLD_array))
{
put_blob(field, *(ISC_QUAD*) (buffer + field->fld_offset));
}
if (task->isStopped())
break;
field = *pField;
put_blob(field, *(ISC_QUAD*) (buffer + field->fld_offset));
}
// Look for any array to write
// we got back the blob_id for the array from isc_receive in the second param.
for (field = relation->rel_fields; field; field = field->fld_next)
for (burp_fld** pField = arrayFlds.begin(); pField < arrayFlds.end(); pField++)
{
if (field->fld_flags & FLD_array)
{
put_array(field, relation, (ISC_QUAD*) (buffer + field->fld_offset));
}
}
}
if (task->isStopped())
break;
BURP_free(buffer);
field = *pField;
put_array(field, relation, (ISC_QUAD*)(buffer + field->fld_offset));
}
BackupRelationTask::recordAdded(tdgbl);
}
if (xdr_buffer.lstr_address)
BURP_free(xdr_buffer.lstr_address);
BURP_verbose(108, SafeArg() << records);
// msg 108 %ld records written
request->free(&status_vector);
if (!status_vector.isSuccess())
BURP_error_redirect(&status_vector, 30);
// msg 30 isc_release_request failed
}
@ -2165,7 +1999,19 @@ void put_relation( burp_rel* relation)
END_ERROR;
}
}
put(tdgbl, (UCHAR) rec_relation_end);
else if (!tdgbl->gbl_sw_meta)
{
FOR(REQUEST_HANDLE tdgbl->handles_put_relation_req_handle3)
FIRST 1 P IN RDB$PAGES WITH P.RDB$RELATION_ID EQ relation->rel_id
AND P.RDB$PAGE_TYPE = pag_pointer
SORTED BY DESCENDING P.RDB$PAGE_SEQUENCE
relation->rel_max_pp = P.RDB$PAGE_SEQUENCE;
END_FOR;
ON_ERROR
general_on_error();
END_ERROR;
}
put(tdgbl, (UCHAR)rec_relation_end);
}
@ -4534,3 +4380,443 @@ void write_user_privileges()
} // namespace
namespace Burp {
/// class ReadRelationMeta
void ReadRelationMeta::setRelation(const burp_rel* relation, bool partition)
{
m_relation = relation;
// Build request BLR. There could be two kind of requests :
// a) partition == true
// SELECT * FROM relation
// WHERE dbkey >= MAKE_DBKEY(rel_id, 0, 0, :loPP)
// AND dbkey < MAKE_DBKEY(rel_id, 0, 0, :hiPP)
// b) partition = false
// SELECT * FROM relation
// Note, computed fields are not included into results
// CVC: A signed short isn't enough if the engine allows near 32K fields,
// each being char(1) ASCII in the worst case. Looking at BLR generation
// below, it's clear an extreme case won't compile => blr_length >= 32K.
// However, SSHORT is the limit for request_length in isc_compile_request.
m_fldCount = 1;
burp_fld* field;
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (!(field->fld_flags & FLD_computed))
{
m_fldCount += 2;
}
}
fb_assert(m_fldCount > 0 && m_fldCount * 9 > 0 && m_fldCount * 9 + 200 > 0);
// Time to generate blr to fetch data. Make sure we allocate a BLR buffer
// large enough to handle the per field overhead
UCHAR* const blr_buffer = m_blr.getBuffer(200 + m_fldCount * 9);
UCHAR* blr = blr_buffer;
add_byte(blr, blr_version4);
add_byte(blr, blr_begin);
// in message
m_inMgsNum = m_outMgsNum = 0;
if (partition)
{
add_byte(blr, blr_message);
add_byte(blr, m_inMgsNum); // Message number
add_word(blr, 2); //
add_byte(blr, blr_long); // loPP
add_byte(blr, 0);
add_byte(blr, blr_long); // hiPP
add_byte(blr, 0);
m_outMgsNum = m_inMgsNum + 1;
}
// out message
add_byte(blr, blr_message);
add_byte(blr, m_outMgsNum); // Message number
add_word(blr, m_fldCount); // Number of fields, counting eof
RCRD_OFFSET offset = 0;
SSHORT count = 0; // This is param count.
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
SSHORT alignment = 4;
FLD_LENGTH length = field->fld_length;
SSHORT dtype = field->fld_type;
if (field->fld_flags & FLD_array)
{
dtype = blr_blob;
length = 8;
}
switch (dtype)
{
case blr_text:
alignment = type_alignments[dtype_text];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
break;
case blr_varying:
alignment = type_alignments[dtype_varying];
add_byte(blr, field->fld_type);
add_word(blr, field->fld_length);
length += sizeof(USHORT);
break;
case blr_short:
alignment = type_alignments[dtype_short];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_long:
alignment = type_alignments[dtype_long];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_quad:
alignment = type_alignments[dtype_quad];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int64:
alignment = type_alignments[dtype_int64];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_int128:
alignment = type_alignments[dtype_int128];
add_byte(blr, field->fld_type);
add_byte(blr, field->fld_scale);
break;
case blr_double:
alignment = type_alignments[dtype_double];
add_byte(blr, field->fld_type);
break;
case blr_timestamp:
alignment = type_alignments[dtype_timestamp];
add_byte(blr, field->fld_type);
break;
case blr_timestamp_tz:
alignment = type_alignments[dtype_timestamp_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_time:
alignment = type_alignments[dtype_sql_time];
add_byte(blr, field->fld_type);
break;
case blr_sql_time_tz:
alignment = type_alignments[dtype_sql_time_tz];
add_byte(blr, field->fld_type);
break;
case blr_sql_date:
alignment = type_alignments[dtype_sql_date];
add_byte(blr, field->fld_type);
break;
case blr_float:
alignment = type_alignments[dtype_real];
add_byte(blr, field->fld_type);
break;
case blr_blob:
alignment = type_alignments[dtype_blob];
add_byte(blr, blr_quad);
add_byte(blr, 0);
break;
case blr_bool:
alignment = type_alignments[dtype_boolean];
add_byte(blr, field->fld_type);
break;
case blr_dec64:
case blr_dec128:
alignment = type_alignments[dtype];
add_byte(blr, field->fld_type);
break;
default:
BURP_error_redirect(NULL, 26, SafeArg() << field->fld_type);
// msg 26 datatype %ld not understood
break;
}
if (alignment)
offset = FB_ALIGN(offset, alignment);
field->fld_offset = offset;
field->fld_parameter = count++;
offset += length;
}
// Next, build fields for null flags
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_short);
add_byte(blr, 0);
offset = FB_ALIGN(offset, sizeof(SSHORT));
field->fld_missing_parameter = count++;
offset += sizeof(SSHORT);
}
// Finally, make up an EOF field
add_byte(blr, blr_short); // eof field
add_byte(blr, 0); // scale for eof field
const SSHORT eof_parameter = count++;
m_outRecLen = offset;
m_outEofOffset = FB_ALIGN(offset, sizeof(SSHORT));
// To be used later for the buffer size to receive data
m_outMsgLen = (USHORT) (m_outEofOffset + sizeof(SSHORT));
if (partition)
{
add_byte(blr, blr_receive);
add_byte(blr, m_inMgsNum);
}
// Build FOR loop, body, and eof handler
add_byte(blr, blr_for);
add_byte(blr, blr_rse);
add_byte(blr, 1); // count of relations
add_byte(blr, blr_rid);
add_word(blr, relation->rel_id);
add_byte(blr, 0); // context variable
if (partition)
{
// add boolean condition
add_byte(blr, blr_boolean);
add_byte(blr, blr_and);
add_byte(blr, blr_geq);
add_byte(blr, blr_dbkey);
add_byte(blr, 0);
add_byte(blr, blr_sys_function);
add_string(blr, "MAKE_DBKEY");
add_byte(blr, 4);
add_byte(blr, blr_literal); // relID
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, relation->rel_id);
add_byte(blr, blr_literal); // recNo
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_literal); // DP
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_parameter); // PP
add_byte(blr, m_inMgsNum);
add_word(blr, 0);
add_byte(blr, blr_lss);
add_byte(blr, blr_dbkey);
add_byte(blr, 0);
add_byte(blr, blr_sys_function);
add_string(blr, "MAKE_DBKEY");
add_byte(blr, 4);
add_byte(blr, blr_literal); // relID
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, relation->rel_id);
add_byte(blr, blr_literal); // recNo
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_literal); // DP
add_byte(blr, blr_long);
add_byte(blr, 0);
add_long(blr, 0);
add_byte(blr, blr_parameter); // PP
add_byte(blr, m_inMgsNum);
add_word(blr, 1);
}
// rse end
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, m_outMgsNum);
add_byte(blr, blr_begin);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 1);
add_byte(blr, blr_parameter);
add_byte(blr, m_outMgsNum);
add_word(blr, eof_parameter);
for (field = relation->rel_fields; field; field = field->fld_next)
{
if (field->fld_flags & FLD_computed)
continue;
add_byte(blr, blr_assignment);
add_byte(blr, blr_fid);
add_byte(blr, 0); // context
add_word(blr, field->fld_id);
add_byte(blr, blr_parameter2);
add_byte(blr, m_outMgsNum); // message number
add_word(blr, field->fld_parameter);
add_word(blr, field->fld_missing_parameter);
}
add_byte(blr, blr_end);
add_byte(blr, blr_send);
add_byte(blr, m_outMgsNum);
add_byte(blr, blr_assignment);
add_byte(blr, blr_literal);
add_byte(blr, blr_short);
add_byte(blr, 0);
add_word(blr, 0);
add_byte(blr, blr_parameter);
add_byte(blr, m_outMgsNum);
add_word(blr, eof_parameter);
add_byte(blr, blr_end);
add_byte(blr, blr_eoc);
const FB_SIZE_T blr_length = blr - blr_buffer;
m_blr.shrink(blr_length);
#ifdef DEBUG
if (debug_on)
fb_print_blr(blr_buffer, blr_length, NULL, NULL, 0);
#endif
}
void ReadRelationMeta::clear()
{
m_relation = NULL;
m_fldCount = 0;
m_blr.clear();
m_inMgsNum = m_outMgsNum = 0;
m_outMsgLen = m_outRecLen = m_outEofOffset = 0;
}
/// class ReadRelationReq
void ReadRelationReq::reset(const ReadRelationMeta* meta)
{
if (m_meta == meta && meta != NULL && m_relation == meta->m_relation)
return;
if (m_meta)
clear();
m_meta = meta;
if (m_meta)
{
m_relation = m_meta->m_relation;
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
FbLocalStatus status;
compile(&status, DB);
if (status->getState() & IStatus::STATE_ERRORS)
{
BURP_error_redirect(&status, 27);
// msg 27 isc_compile_request failed
fb_print_blr(m_meta->m_blr.begin(), m_meta->m_blr.getCount(), NULL, NULL, 0);
}
UCHAR* data = m_outMsg.getBuffer(m_meta->m_outMsgLen);
m_eof = reinterpret_cast<SSHORT*> (data + m_meta->m_outEofOffset);
}
else
{
m_relation = NULL;
m_request = 0;
m_eof = NULL;
}
memset(&m_inMgs, 0, sizeof(m_inMgs));
}
void ReadRelationReq::clear()
{
m_relation = NULL;
m_meta = NULL;
m_eof = NULL;
m_outMsg.clear();
if (m_request)
{
FbLocalStatus status;
release(&status);
}
}
void ReadRelationReq::compile(CheckStatusWrapper* status, IAttachment* att)
{
m_request = att->compileRequest(status, m_meta->m_blr.getCount(), m_meta->m_blr.begin());
}
void ReadRelationReq::release(CheckStatusWrapper* status)
{
if (m_request)
{
m_request->free(status);
if (!(status->getState() & IStatus::STATE_ERRORS))
{
m_request = nullptr;
clear();
}
}
}
void ReadRelationReq::setParams(ULONG loPP, ULONG hiPP)
{
m_inMgs.loPP = loPP;
m_inMgs.hiPP = hiPP;
}
void ReadRelationReq::start(CheckStatusWrapper* status, ITransaction* tran)
{
if (m_meta->haveInputs())
m_request->startAndSend(status, tran, 0, m_meta->m_inMgsNum, sizeof(m_inMgs), &m_inMgs);
else
m_request->start(status, tran, 0);
}
void ReadRelationReq::receive(CheckStatusWrapper* status)
{
m_request->receive(status, 0, m_meta->m_outMgsNum, m_meta->m_outMsgLen, m_outMsg.begin());
}
/// class BackupRelationTask
bool BackupRelationTask::tableReader(Item& item)
{
item.m_request.reset(&m_metadata);
item.m_request.setParams(item.m_ppSequence, item.m_ppSequence + 1);
put_data(item.m_relation, &item.m_request);
item.getBackupTask()->releaseBuffer(item);
return true;
}
} // namespace Burp

View File

@ -61,6 +61,7 @@
#include "../common/os/os_utils.h"
#include "../burp/burpswi.h"
#include "../common/db_alias.h"
#include "../burp/BurpTasks.h"
#ifdef HAVE_CTYPE_H
#include <ctype.h>
@ -83,8 +84,9 @@
#include <sys/file.h>
#endif
using namespace Firebird;
using MsgFormat::SafeArg;
using Firebird::FbLocalStatus;
using namespace Burp;
const char* fopen_write_type = "w";
const char* fopen_read_type = "r";
@ -596,6 +598,7 @@ int gbak(Firebird::UtilSvc* uSvc)
tdgbl->gbl_sw_old_descriptions = false;
tdgbl->gbl_sw_mode = false;
tdgbl->gbl_sw_skip_count = 0;
tdgbl->gbl_sw_par_workers = 1;
tdgbl->action = NULL;
burp_fil* file = NULL;
@ -868,6 +871,19 @@ int gbak(Firebird::UtilSvc* uSvc)
// skip a service specification
in_sw_tab->in_sw_state = false;
break;
case IN_SW_BURP_PARALLEL_WORKERS:
if (++itr >= argc)
{
BURP_error(407, true);
// msg 407 parallel workers parameter missing
}
tdgbl->gbl_sw_par_workers = get_number(argv[itr]);
if (tdgbl->gbl_sw_par_workers <= 0)
{
BURP_error(408, true, argv[itr]);
// msg 408 expected parallel workers, encountered "%s"
}
break;
case IN_SW_BURP_Y:
{
// want to do output redirect handling now instead of waiting
@ -965,6 +981,11 @@ int gbak(Firebird::UtilSvc* uSvc)
BURP_error(334, true, SafeArg() << in_sw_tab->in_sw_name);
tdgbl->gbl_sw_convert_ext_tables = true;
break;
case IN_SW_BURP_DIRECT_IO:
if (tdgbl->gbl_sw_direct_io)
BURP_error(334, true, SafeArg() << in_sw_tab->in_sw_name);
tdgbl->gbl_sw_direct_io = true;
break;
case IN_SW_BURP_E:
if (!tdgbl->gbl_sw_compress)
BURP_error(334, true, SafeArg() << in_sw_tab->in_sw_name);
@ -1380,6 +1401,7 @@ int gbak(Firebird::UtilSvc* uSvc)
MVOL_init(tdgbl->io_buffer_size);
int result;
tdgbl->gbl_dpb_data.add(dpb.getBuffer(), dpb.getBufferLength());
tdgbl->uSvc->started();
switch (action)
@ -1490,7 +1512,9 @@ void BURP_abort()
* Abandon a failed operation.
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
USHORT code = tdgbl->action && tdgbl->action->act_action == ACT_backup_fini ? 351 : 83;
// msg 351 Error closing database, but backup file is OK
// msg 83 Exiting before completion due to errors
@ -1517,7 +1541,8 @@ void BURP_error(USHORT errcode, bool abort, const SafeArg& arg)
* Functional description
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
tdgbl->uSvc->setServiceStatus(burp_msg_fac, errcode, arg);
tdgbl->uSvc->started();
@ -1564,6 +1589,7 @@ void BURP_error_redirect(Firebird::IStatus* status_vector, USHORT errcode, const
* Issue error message. Output messages then abort.
*
**************************************/
BurpMaster master;
BURP_print_status(true, status_vector);
BURP_error(errcode, true, arg);
@ -1672,6 +1698,7 @@ void BURP_print(bool err, USHORT number, const SafeArg& arg)
* will accept.
*
**************************************/
BurpMaster master;
BURP_msg_partial(err, 169); // msg 169: gbak:
BURP_msg_put(err, number, arg);
@ -1692,6 +1719,7 @@ void BURP_print(bool err, USHORT number, const char* str)
* will accept.
*
**************************************/
BurpMaster master;
static const SafeArg dummy;
BURP_msg_partial(err, 169, dummy); // msg 169: gbak:
@ -1714,11 +1742,13 @@ void BURP_print_status(bool err, Firebird::IStatus* status_vector)
**************************************/
if (status_vector)
{
BurpMaster master;
BurpGlobals* tdgbl = master.get();
const ISC_STATUS* vector = status_vector->getErrors();
if (err)
{
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
tdgbl->uSvc->setServiceStatus(vector);
tdgbl->uSvc->started();
@ -1759,6 +1789,9 @@ void BURP_print_warning(Firebird::IStatus* status)
**************************************/
if (status && (status->getState() & Firebird::IStatus::STATE_WARNINGS))
{
BurpMaster master;
BurpGlobals* tdgbl = master.get();
// print the warning message
const ISC_STATUS* vector = status->getWarnings();
SCHAR s[1024];
@ -1791,7 +1824,8 @@ void BURP_verbose(USHORT number, const SafeArg& arg)
* If not verbose then calls yielding function.
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
if (tdgbl->gbl_sw_verbose)
BURP_message(number, arg, true);
@ -1812,7 +1846,8 @@ void BURP_message(USHORT number, const MsgFormat::SafeArg& arg, bool totals)
* Calls BURP_msg for formatting & displaying a message.
*
**************************************/
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
if (totals)
tdgbl->print_stats_header();
@ -2108,6 +2143,7 @@ static gbak_action open_files(const TEXT* file1,
tdgbl->uSvc->setDataMode(true);
fil->fil_fd = GBAK_STDOUT_DESC();
tdgbl->stdIoMode = true;
tdgbl->gbl_sw_direct_io = false;
break;
}
else
@ -2116,7 +2152,8 @@ static gbak_action open_files(const TEXT* file1,
#ifdef WIN_NT
if ((fil->fil_fd = NT_tape_open(nm.c_str(), MODE_WRITE, CREATE_ALWAYS)) == INVALID_HANDLE_VALUE)
#else
if ((fil->fil_fd = os_utils::open(nm.c_str(), MODE_WRITE, open_mask)) == -1)
const int wmode = MODE_WRITE | (tdgbl->gbl_sw_direct_io ? O_DIRECT : 0);
if ((fil->fil_fd = open(fil->fil_name.c_str(), wmode, open_mask)) == -1)
#endif // WIN_NT
{
@ -2213,6 +2250,7 @@ static gbak_action open_files(const TEXT* file1,
fil->fil_fd = GBAK_STDIN_DESC();
tdgbl->file_desc = fil->fil_fd;
tdgbl->stdIoMode = true;
tdgbl->gbl_sw_direct_io = false;
tdgbl->gbl_sw_files = fil->fil_next;
}
else
@ -2224,7 +2262,8 @@ static gbak_action open_files(const TEXT* file1,
#ifdef WIN_NT
if ((fil->fil_fd = NT_tape_open(nm.c_str(), MODE_READ, OPEN_EXISTING)) == INVALID_HANDLE_VALUE)
#else
if ((fil->fil_fd = os_utils::open(nm.c_str(), MODE_READ)) == INVALID_HANDLE_VALUE)
const int rmode = MODE_READ | (tdgbl->gbl_sw_direct_io ? O_DIRECT : 0);
if ((fil->fil_fd = os_utils::open(nm.c_str(), rmode)) == INVALID_HANDLE_VALUE)
#endif
{
BURP_error(65, true, fil->fil_name.c_str());
@ -2269,7 +2308,7 @@ static gbak_action open_files(const TEXT* file1,
#ifdef WIN_NT
if ((fil->fil_fd = NT_tape_open(nm.c_str(), MODE_READ, OPEN_EXISTING)) == INVALID_HANDLE_VALUE)
#else
if ((fil->fil_fd = os_utils::open(nm.c_str(), MODE_READ)) == INVALID_HANDLE_VALUE)
if ((fil->fil_fd = os_utils::open(nm.c_str(), rmode)) == INVALID_HANDLE_VALUE)
#endif
{
BURP_error(65, false, fil->fil_name.c_str());
@ -2427,7 +2466,8 @@ static void burp_output(bool err, const SCHAR* format, ...)
**************************************/
va_list arglist;
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
BurpMaster master;
BurpGlobals* tdgbl = master.get();
if (tdgbl->sw_redirect != NOOUTPUT && format[0] != '\0')
{
@ -2709,7 +2749,10 @@ bool BurpGlobals::skipRelation(const char* name)
{ false, false, true} // NM p
};
return result[checkPattern(skipDataMatcher, name)][checkPattern(includeDataMatcher, name)];
const enum Pattern res1 = checkPattern(skipDataMatcher, name);
const enum Pattern res2 = checkPattern(includeDataMatcher, name);
return result[res1][res2];
}
void BurpGlobals::read_stats(SINT64* stats)

View File

@ -745,6 +745,7 @@ struct burp_rel
SSHORT rel_name_length;
GDS_NAME rel_name;
GDS_NAME rel_owner; // relation owner, if not us
ULONG rel_max_pp; // max pointer page sequence number
};
enum burp_rel_flags_vals {
@ -954,6 +955,10 @@ public:
: ThreadData(ThreadData::tddGBL),
GblPool(us->isService()),
defaultCollations(getPool()),
gbl_dpb_data(*getDefaultMemoryPool()),
master(true),
taskItem(NULL),
gbl_sw_par_workers(1),
uSvc(us),
verboseInterval(10000),
flag_on_line(true),
@ -1001,6 +1006,7 @@ public:
bool gbl_sw_mode;
bool gbl_sw_mode_val;
bool gbl_sw_overwrite;
bool gbl_sw_direct_io;
bool gbl_sw_zip;
const SCHAR* gbl_sw_keyholder;
const SCHAR* gbl_sw_crypt;
@ -1014,6 +1020,7 @@ public:
SLONG gbl_sw_page_buffers;
burp_fil* gbl_sw_files;
burp_fil* gbl_sw_backup_files;
int gbl_sw_par_workers;
gfld* gbl_global_fields;
unsigned gbl_network_protocol;
burp_act* action;
@ -1068,6 +1075,7 @@ public:
FB_UINT64 mvol_cumul_count;
UCHAR* mvol_io_ptr;
int mvol_io_cnt;
UCHAR* mvol_io_memory; // as allocated, not aligned pointer
UCHAR* mvol_io_buffer;
UCHAR* mvol_io_volume;
UCHAR* mvol_io_header;
@ -1084,6 +1092,7 @@ public:
Firebird::IAttachment* db_handle;
Firebird::ITransaction* tr_handle;
Firebird::ITransaction* global_trans;
TraNumber tr_snapshot;
DESC file_desc;
int exit_code;
UCHAR* head_of_mem_list;
@ -1147,6 +1156,7 @@ public:
Firebird::IRequest* handles_put_index_req_handle7;
Firebird::IRequest* handles_put_relation_req_handle1;
Firebird::IRequest* handles_put_relation_req_handle2;
Firebird::IRequest* handles_put_relation_req_handle3;
Firebird::IRequest* handles_store_blr_gen_id_req_handle1;
Firebird::IRequest* handles_write_function_args_req_handle1;
Firebird::IRequest* handles_write_function_args_req_handle2;
@ -1181,7 +1191,10 @@ public:
Firebird::Array<Firebird::Pair<Firebird::NonPooled<Firebird::MetaString, Firebird::MetaString> > >
defaultCollations;
Firebird::Array<UCHAR> gbl_dpb_data;
Firebird::UtilSvc* uSvc;
bool master; // set for master thread only
void* taskItem; // current task item, if any
ULONG verboseInterval; // How many records should be backed up or restored before we show this message
bool flag_on_line; // indicates whether we will bring the database on-line
bool firstMap; // this is the first time we entered get_mapping()

View File

@ -99,6 +99,9 @@ const int IN_SW_BURP_CRYPT = 51; // name of crypt plugin
const int IN_SW_BURP_INCLUDE_DATA = 52; // backup data from tables
const int IN_SW_BURP_REPLICA = 53; // replica mode
const int IN_SW_BURP_PARALLEL_WORKERS = 54; // parallel workers
const int IN_SW_BURP_DIRECT_IO = 55; // direct IO for backup files
/**************************************************************************/
static const char* const BURP_SW_MODE_NONE = "NONE";
@ -121,6 +124,8 @@ static const Switches::in_sw_tab_t reference_burp_in_sw_table[] =
// msg 254: @1CO(NVERT) backup external files as tables
{IN_SW_BURP_CRYPT, isc_spb_bkp_crypt, "CRYPT", 0, 0, 0, false, false, 373, 3, NULL, boGeneral},
// msg 373:@1CRY(PT) plugin name
{IN_SW_BURP_DIRECT_IO, isc_spb_bkp_direct_io,"DIRECT_IO", 0, 0, 0, false, true, 409, 1, NULL, boGeneral},
// msg 409: @1D(IRECT_IO) direct IO for backup file(s)
{IN_SW_BURP_E, isc_spb_bkp_expand, "EXPAND", 0, 0, 0, false, true, 97, 1, NULL, boBackup},
// msg 97: @1EXPAND no data compression
{IN_SW_BURP_FA, isc_spb_bkp_factor, "FACTOR", 0, 0, 0, false, false, 181, 2, NULL, boBackup},
@ -164,6 +169,8 @@ static const Switches::in_sw_tab_t reference_burp_in_sw_table[] =
// msg 186: @1OLD_DESCRIPTIONS save old style metadata descriptions
{IN_SW_BURP_P, isc_spb_res_page_size, "PAGE_SIZE", 0, 0, 0, false, false, 101, 1, NULL, boRestore},
// msg 101: @1PAGE_SIZE override default page size
{IN_SW_BURP_PARALLEL_WORKERS, isc_spb_bkp_parallel_workers, "PARALLEL", 0, 0, 0, false, false, 406, 3, NULL, boGeneral},
// msg 406: @1PAR(ALLEL) parallel workers
{IN_SW_BURP_PASS, 0, "PASSWORD", 0, 0, 0, false, false, 190, 3, NULL, boGeneral},
// msg 190: @1PA(SSWORD) Firebird password
{IN_SW_BURP_RECREATE, 0, "RECREATE_DATABASE", 0, 0, 0, false, false, 284, 1, NULL, boMain},

View File

@ -43,6 +43,7 @@
#include "../burp/burp_proto.h"
#include "../burp/mvol_proto.h"
#include "../burp/split/spit.h"
#include "../burp/BurpTasks.h"
#include "../yvalve/gds_proto.h"
#include "../common/gdsassert.h"
#include "../common/os/os_utils.h"
@ -69,6 +70,7 @@
using MsgFormat::SafeArg;
using Firebird::FbLocalStatus;
using namespace Burp;
const int open_mask = 0666;
@ -638,7 +640,8 @@ FB_UINT64 mvol_fini_write(BurpGlobals* tdgbl, int* io_cnt, UCHAR** io_ptr)
}
tdgbl->file_desc = INVALID_HANDLE_VALUE;
BURP_free(tdgbl->mvol_io_header);
BURP_free(tdgbl->mvol_io_memory);
tdgbl->mvol_io_memory = NULL;
tdgbl->mvol_io_header = NULL;
tdgbl->mvol_io_buffer = NULL;
tdgbl->blk_io_cnt = 0;
@ -803,7 +806,9 @@ void mvol_init_write(BurpGlobals* tdgbl, const char* file_name, int* cnt, UCHAR*
tdgbl->mvol_actual_buffer_size = tdgbl->mvol_io_buffer_size;
const ULONG temp_buffer_size = tdgbl->mvol_io_buffer_size * tdgbl->gbl_sw_blk_factor;
tdgbl->mvol_io_ptr = tdgbl->mvol_io_buffer = BURP_alloc(temp_buffer_size + MAX_HEADER_SIZE);
tdgbl->mvol_io_memory = BURP_alloc(temp_buffer_size + MAX_HEADER_SIZE * 2);
tdgbl->mvol_io_ptr = tdgbl->mvol_io_buffer =
(UCHAR*) FB_ALIGN((U_IPTR) tdgbl->mvol_io_memory, MAX_HEADER_SIZE);
tdgbl->mvol_io_cnt = tdgbl->mvol_actual_buffer_size;
while (!write_header(tdgbl->file_desc, temp_buffer_size, false))
@ -830,6 +835,14 @@ void mvol_init_write(BurpGlobals* tdgbl, const char* file_name, int* cnt, UCHAR*
void MVOL_read(BurpGlobals* tdgbl)
{
// Setup our pointer
if (!tdgbl->master)
{
// hvlad: it will throw ExcReadDone exception when there is nothing to read
RestoreRelationTask::renewBuffer(tdgbl);
tdgbl->mvol_io_ptr = tdgbl->mvol_io_buffer;
return;
}
tdgbl->gbl_io_ptr = tdgbl->gbl_compress_buffer;
tdgbl->gbl_io_cnt = unzip_read_block(tdgbl, tdgbl->gbl_io_ptr, ZC_BUFSIZE);
}
@ -875,6 +888,8 @@ static void os_read(int* cnt, UCHAR** ptr)
{
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
fb_assert(tdgbl->master);
for (;;)
{
tdgbl->mvol_io_cnt = read(tdgbl->file_desc, tdgbl->mvol_io_buffer, tdgbl->mvol_io_buffer_size);
@ -918,6 +933,7 @@ static void os_read(int* cnt, UCHAR** ptr)
{
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
fb_assert(tdgbl->master);
fb_assert(tdgbl->blk_io_cnt <= 0);
for (;;)
@ -1025,11 +1041,13 @@ DESC NT_tape_open(const char* name, ULONG mode, ULONG create)
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
const DWORD flags = tdgbl->gbl_sw_direct_io ? FILE_FLAG_NO_BUFFERING : 0;
if (strnicmp(name, "\\\\.\\tape", 8))
{
handle = CreateFile(name, mode,
mode == MODE_WRITE ? 0 : FILE_SHARE_READ,
NULL, create, FILE_ATTRIBUTE_NORMAL, NULL);
NULL, create, FILE_ATTRIBUTE_NORMAL | flags, NULL);
}
else
{
@ -1048,7 +1066,7 @@ DESC NT_tape_open(const char* name, ULONG mode, ULONG create)
//
handle = CreateFile(name, mode | MODE_READ,
mode == MODE_WRITE ? FILE_SHARE_WRITE : FILE_SHARE_READ,
0, OPEN_EXISTING, 0, NULL);
NULL, OPEN_EXISTING, flags, NULL);
if (handle != INVALID_HANDLE_VALUE)
{
// emulate UNIX rewinding the tape on open:
@ -1082,6 +1100,12 @@ DESC NT_tape_open(const char* name, ULONG mode, ULONG create)
//
void MVOL_write(BurpGlobals* tdgbl)
{
if (!tdgbl->master)
{
BackupRelationTask::renewBuffer(tdgbl);
return;
}
fb_assert(tdgbl->gbl_io_ptr >= tdgbl->gbl_compress_buffer);
fb_assert(tdgbl->gbl_io_ptr <= tdgbl->gbl_compress_buffer + ZC_BUFSIZE);
@ -1098,6 +1122,14 @@ UCHAR mvol_write(const UCHAR c, int* io_cnt, UCHAR** io_ptr)
BurpGlobals* tdgbl = BurpGlobals::getSpecific();
if (!tdgbl->master)
{
BackupRelationTask::renewBuffer(tdgbl);
*(*io_ptr)++ = c;
(*io_cnt)--;
return c;
}
const ULONG size_to_write = BURP_UP_TO_BLOCK(*io_ptr - tdgbl->mvol_io_buffer);
FB_UINT64 left = size_to_write;
@ -1317,10 +1349,17 @@ const UCHAR* MVOL_write_block(BurpGlobals* tdgbl, const UCHAR* ptr, ULONG count)
// If buffer full, write it
if (tdgbl->gbl_io_cnt <= 0)
{
zip_write_block(tdgbl, tdgbl->gbl_compress_buffer, tdgbl->gbl_io_ptr - tdgbl->gbl_compress_buffer, false);
if (!tdgbl->master)
{
BackupRelationTask::renewBuffer(tdgbl);
}
else
{
zip_write_block(tdgbl, tdgbl->gbl_compress_buffer, tdgbl->gbl_io_ptr - tdgbl->gbl_compress_buffer, false);
tdgbl->gbl_io_ptr = tdgbl->gbl_compress_buffer;
tdgbl->gbl_io_cnt = ZC_BUFSIZE;
tdgbl->gbl_io_ptr = tdgbl->gbl_compress_buffer;
tdgbl->gbl_io_cnt = ZC_BUFSIZE;
}
}
const ULONG n = MIN(count, (ULONG) tdgbl->gbl_io_cnt);
@ -1510,7 +1549,11 @@ static DESC next_volume( DESC handle, ULONG mode, bool full_buffer)
new_desc = NT_tape_open(new_file, mode, OPEN_ALWAYS);
if (new_desc == INVALID_HANDLE_VALUE)
#else
new_desc = os_utils::open(new_file, mode, open_mask);
ULONG mode2 = mode;
if (mode == MODE_WRITE && tdgbl->gbl_sw_direct_io)
mode2 |= O_DIRECT;
new_desc = open(new_file, mode2, open_mask);
if (new_desc < 0)
#endif // WIN_NT
{
@ -2001,7 +2044,8 @@ static bool write_header(DESC handle, ULONG backup_buffer_size, bool full_buffer
put(tdgbl, att_end);
tdgbl->mvol_io_data = tdgbl->mvol_io_ptr;
tdgbl->mvol_io_data = (UCHAR*) FB_ALIGN((U_IPTR) tdgbl->mvol_io_ptr, MAX_HEADER_SIZE);
fb_assert(tdgbl->mvol_io_data == tdgbl->mvol_io_header + MAX_HEADER_SIZE);
}
else
{

File diff suppressed because it is too large Load Diff

332
src/common/Task.cpp Normal file
View File

@ -0,0 +1,332 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: Task.cpp
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#include "../common/Task.h"
namespace Firebird {
/// class WorkerThread
THREAD_ENTRY_DECLARE WorkerThread::workerThreadRoutine(THREAD_ENTRY_PARAM arg)
{
WorkerThread* thd = static_cast<WorkerThread*> (arg);
return (THREAD_ENTRY_RETURN)(IPTR) thd->threadRoutine();
}
WorkerThread* WorkerThread::start(Coordinator* coordinator)
{
AutoPtr<WorkerThread> thd = FB_NEW WorkerThread(coordinator);
Thread::start(workerThreadRoutine, thd, THREAD_medium, &thd->m_thdHandle);
return thd.release();
}
int WorkerThread::threadRoutine()
{
m_state = IDLE;
m_signalSem.release();
while(m_state != STOPPING)
{
m_waitSem.enter();
if (m_state == RUNNING && m_worker != NULL)
{
m_worker->work(this);
m_worker = NULL;
}
if (m_state == RUNNING)
{
m_state = IDLE;
m_signalSem.release();
}
if (m_state == STOPPING)
break;
}
return 0;
}
void WorkerThread::runWorker(Worker* worker)
{
fb_assert(m_worker == NULL);
fb_assert(m_state == IDLE);
m_worker = worker;
m_state = RUNNING;
m_waitSem.release();
}
bool WorkerThread::waitForState(STATE state, int timeout)
{
while (m_state != state) // || m_state == old_state - consume old signals ?
{
if (timeout >= 0)
{
m_signalSem.tryEnter(0, timeout);
break;
}
else
m_signalSem.enter();
}
return (m_state == state);
}
void WorkerThread::shutdown(bool wait)
{
if (m_state == SHUTDOWN)
return;
m_state = STOPPING;
m_waitSem.release();
if (wait)
{
Thread::waitForCompletion(m_thdHandle);
m_state = SHUTDOWN;
}
}
/// class Worker
bool Worker::work(WorkerThread* thd)
{
fb_assert(m_state == READY);
m_state = WORKING;
m_thread = thd;
Task::WorkItem* workItem = NULL;
while (true)
{
if (m_thread && m_thread->getState() != WorkerThread::RUNNING)
break;
if (!m_task->getWorkItem(&workItem))
break;
if (!m_task->handler(*workItem))
break;
}
m_thread = NULL;
m_state = IDLE;
return true;
}
bool Worker::waitFor(int timeout)
{
if (m_state == IDLE)
return true;
if (m_thread == NULL)
return false;
m_thread->waitForState(WorkerThread::IDLE, timeout);
return (m_state == IDLE);
}
/// class Coordinator
Coordinator::~Coordinator()
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
for (WorkerThread** p = m_activeThreads.begin(); p < m_activeThreads.end(); p++)
(*p)->shutdown(false);
while (!m_activeThreads.isEmpty())
{
WorkerThread* thd = m_activeThreads.pop();
{
MutexUnlockGuard unlock(m_mutex, FB_FUNCTION);
thd->shutdown(true);
}
delete thd;
}
while (!m_idleThreads.isEmpty())
{
WorkerThread* thd = m_idleThreads.pop();
{
MutexUnlockGuard unlock(m_mutex, FB_FUNCTION);
thd->shutdown(true);
}
delete thd;
}
while (!m_activeWorkers.isEmpty())
{
Worker* w = m_activeWorkers.back();
MutexUnlockGuard unlock(m_mutex, FB_FUNCTION);
w->waitFor(-1);
}
while (!m_idleWorkers.isEmpty())
{
Worker* w = m_idleWorkers.pop();
delete w;
}
}
void Coordinator::runSync(Task* task)
{
int cntWorkers = setupWorkers(task->getMaxWorkers());
if (cntWorkers < 1)
return;
HalfStaticArray<WorkerAndThd, 8> taskWorkers(*m_pool, cntWorkers);
Worker* syncWorker = getWorker();
taskWorkers.push(WorkerAndThd(syncWorker, NULL));
for (int i = 1; i < cntWorkers; i++)
{
WorkerThread* thd = getThread();
if (thd)
{
Worker* w = getWorker();
taskWorkers.push(WorkerAndThd(w, thd));
w->setTask(task);
thd->runWorker(w);
}
}
// run syncronously
syncWorker->setTask(task);
syncWorker->work(NULL);
// wait for all workes
for (int i = 0; i < cntWorkers; i++)
{
WorkerAndThd& wt = taskWorkers[i];
if (wt.thread)
{
if (!wt.worker->isIdle())
wt.thread->waitForState(WorkerThread::IDLE, -1);
releaseThread(wt.thread);
}
releaseWorker(wt.worker);
}
}
Worker* Coordinator::getWorker()
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
Worker* w = NULL;
if (!m_idleWorkers.isEmpty())
{
w = m_idleWorkers.pop();
m_activeWorkers.push(w);
}
return w;
}
void Coordinator::releaseWorker(Worker* w)
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
FB_SIZE_T pos;
if (m_activeWorkers.find(w, pos))
{
m_activeWorkers.remove(pos);
m_idleWorkers.push(w);
}
fb_assert(m_idleWorkers.find(w, pos));
}
int Coordinator::setupWorkers(int count)
{
// TODO adjust count
for (int i = m_workers.getCount(); i < count; i++)
{
Worker* w = FB_NEW_POOL(*m_pool) Worker(this);
m_workers.add(w);
m_idleWorkers.push(w);
}
return count;
}
WorkerThread* Coordinator::getThread()
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
WorkerThread* thd = NULL;
if (!m_idleThreads.isEmpty())
thd = m_idleThreads.pop();
else
{
thd = WorkerThread::start(this);
if (thd)
thd->waitForState(WorkerThread::IDLE, -1);
}
if (thd)
{
fb_assert(thd->getState() == WorkerThread::IDLE);
m_activeThreads.push(thd);
}
return thd;
}
void Coordinator::releaseThread(WorkerThread* thd)
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
const WorkerThread::STATE thdState = thd->getState();
if (thdState != WorkerThread::IDLE)
{
fb_assert(thdState == WorkerThread::STOPPING || thdState == WorkerThread::SHUTDOWN);
return;
}
FB_SIZE_T pos;
if (m_activeThreads.find(thd, pos))
{
m_activeThreads.remove(pos);
m_idleThreads.push(thd);
}
else
{
fb_assert(false);
if (!m_idleThreads.find(thd, pos))
m_idleThreads.push(thd);
}
}
} // namespace Jrd

209
src/common/Task.h Normal file
View File

@ -0,0 +1,209 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: Task.h
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#ifndef COMMON_TASK_H
#define COMMON_TASK_H
#include "firebird.h"
#include "../common/classes/alloc.h"
#include "../common/classes/array.h"
#include "../common/classes/locks.h"
#include "../common/classes/semaphore.h"
#include "../common/ThreadStart.h"
namespace Firebird
{
class Worker;
class Coordinator;
class WorkerThread;
// Task (probably big one), contains parameters, could break whole task by
// smaller items (WorkItem), handle items, track common running state, track
// results and error happens.
class Task
{
public:
Task() {};
virtual ~Task() {};
// task item to handle
class WorkItem
{
public:
WorkItem(Task* task) :
m_task(task)
{}
virtual ~WorkItem() {}
Task* m_task;
};
// task item handler
virtual bool handler(WorkItem&) = 0;
virtual bool getWorkItem(WorkItem**) = 0;
virtual bool getResult(IStatus* status) = 0;
// evaluate task complexity and recommend number of parallel workers
virtual int getMaxWorkers() { return 1; }
};
// Worker: handle work items, optionally uses separate thread
class Worker
{
public:
Worker(Coordinator* coordinator) :
m_coordinator(coordinator),
m_thread(NULL),
m_task(NULL),
m_state(IDLE)
{
}
virtual ~Worker() {}
void setTask(Task* task)
{
m_task = task;
m_state = READY;
}
bool work(WorkerThread* thd);
bool isIdle() const { return m_state == IDLE; };
bool waitFor(int timeout = -1);
protected:
enum STATE {IDLE, READY, WORKING};
Coordinator* const m_coordinator; // set in constructor, not changed
WorkerThread* m_thread;
Task* m_task;
STATE m_state;
};
// Accept Task(s) to handle, creates and assigns Workers to work on task(s),
// bind Workers to Threads, synchronize task completion and get results.
class Coordinator
{
public:
Coordinator(MemoryPool* pool) :
m_pool(pool),
m_workers(*m_pool),
m_idleWorkers(*m_pool),
m_activeWorkers(*m_pool),
m_idleThreads(*m_pool),
m_activeThreads(*m_pool)
{}
~Coordinator();
void runSync(Task*);
private:
struct WorkerAndThd
{
WorkerAndThd() :
worker(NULL),
thread(NULL)
{}
WorkerAndThd(Worker* w, WorkerThread* t) :
worker(w),
thread(t)
{}
Worker* worker;
WorkerThread* thread;
};
// determine how many workers needed, allocate max possible number
// of workers, make it all idle, return number of allocated workers
int setupWorkers(int count);
Worker* getWorker();
void releaseWorker(Worker*);
WorkerThread* getThread();
void releaseThread(WorkerThread*);
MemoryPool* m_pool;
Mutex m_mutex;
HalfStaticArray<Worker*, 8> m_workers;
HalfStaticArray<Worker*, 8> m_idleWorkers;
HalfStaticArray<Worker*, 8> m_activeWorkers;
// todo: move to thread pool
HalfStaticArray<WorkerThread*, 8> m_idleThreads;
HalfStaticArray<WorkerThread*, 8> m_activeThreads;
};
class WorkerThread
{
public:
enum STATE {STARTING, IDLE, RUNNING, STOPPING, SHUTDOWN};
~WorkerThread()
{
shutdown(true);
#ifdef WIN_NT
if (m_thdHandle != INVALID_HANDLE_VALUE)
CloseHandle(m_thdHandle);
#endif
}
static WorkerThread* start(Coordinator*);
void runWorker(Worker*);
bool waitForState(STATE state, int timeout);
void shutdown(bool wait);
STATE getState() const { return m_state; }
private:
WorkerThread(Coordinator* coordinator) :
m_coordinator(coordinator),
m_worker(NULL),
m_state(STARTING)
{}
static THREAD_ENTRY_DECLARE workerThreadRoutine(THREAD_ENTRY_PARAM);
int threadRoutine();
Coordinator* const m_coordinator;
Worker* m_worker;
Semaphore m_waitSem; // idle thread waits on this semaphore to start work or go out
Semaphore m_signalSem; // semaphore is released when thread going idle
STATE m_state;
Thread::Handle m_thdHandle;
};
} // namespace Jrd
#endif // COMMON_TASK_H

View File

@ -346,6 +346,7 @@ ClumpletReader::ClumpletType ClumpletReader::getClumpletType(UCHAR tag) const
return StringSpb;
case isc_spb_bkp_factor:
case isc_spb_bkp_length:
case isc_spb_bkp_parallel_workers:
case isc_spb_res_length:
case isc_spb_res_buffers:
case isc_spb_res_page_size:
@ -369,6 +370,7 @@ ClumpletReader::ClumpletType ClumpletReader::getClumpletType(UCHAR tag) const
case isc_spb_rpr_commit_trans:
case isc_spb_rpr_rollback_trans:
case isc_spb_rpr_recover_two_phase:
case isc_spb_rpr_par_workers:
return IntSpb;
case isc_spb_rpr_commit_trans_64:
case isc_spb_rpr_rollback_trans_64:

View File

@ -27,7 +27,6 @@
#include "../../common/classes/TimerImpl.h"
#include "../../common/StatusHolder.h"
#include "../../common/ThreadStart.h"
#include "../../common/utils_proto.h"
namespace Firebird {
@ -36,7 +35,7 @@ void TimerImpl::handler()
{
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
fb_assert(!m_inHandler);
fb_assert(!m_handlerTid);
m_fireTime = 0;
if (!m_expTime) // Timer was reset to zero or stopped, do nothing
@ -54,7 +53,7 @@ void TimerImpl::handler()
m_expTime = 0;
if (m_onTimer)
m_inHandler = true;
m_handlerTid = Thread::getId();
}
if (!m_onTimer)
@ -63,7 +62,7 @@ void TimerImpl::handler()
m_onTimer(this);
MutexLockGuard guard(m_mutex, FB_FUNCTION);
m_inHandler = false;
m_handlerTid = 0;
}
void TimerImpl::reset(unsigned int timeout)
@ -108,8 +107,12 @@ void TimerImpl::stop()
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
// Allow handler() to call stop()
if (m_handlerTid == Thread::getId())
return;
// hvlad: it could be replaced by condition variable when we have good one for Windows
while (m_inHandler)
while (m_handlerTid)
{
MutexUnlockGuard unlock(m_mutex, FB_FUNCTION);
Thread::sleep(10);

View File

@ -32,6 +32,7 @@
#include "../../common/classes/ImplementHelper.h"
#include "../../common/classes/locks.h"
#include "../../common/ThreadStart.h"
namespace Firebird {
@ -51,7 +52,7 @@ public:
TimerImpl() :
m_fireTime(0),
m_expTime(0),
m_inHandler(false)
m_handlerTid(0)
{ }
// ITimer implementation
@ -82,9 +83,9 @@ public:
private:
Mutex m_mutex;
SINT64 m_fireTime; // when ITimer will fire, could be less than m_expTime
SINT64 m_expTime; // when actual idle timeout will expire
SINT64 m_expTime; // when actual timeout will expire
std::function<OnTimerFunc> m_onTimer;
bool m_inHandler;
ThreadId m_handlerTid; // ID of handler thread, if handler is running
};

View File

@ -412,6 +412,12 @@ void Config::checkValues()
checkIntForLoBound(KEY_INLINE_SORT_THRESHOLD, 0, true);
checkIntForLoBound(KEY_MAX_STATEMENT_CACHE_SIZE, 0, true);
checkIntForLoBound(KEY_MAX_PARALLEL_WORKERS, 1, true);
checkIntForHiBound(KEY_MAX_PARALLEL_WORKERS, 64, false); // todo: detect number of available cores
checkIntForLoBound(KEY_PARALLEL_WORKERS, 1, true);
checkIntForHiBound(KEY_MAX_PARALLEL_WORKERS, values[KEY_MAX_PARALLEL_WORKERS].intVal, false);
}

View File

@ -190,6 +190,8 @@ enum ConfigKey
KEY_INLINE_SORT_THRESHOLD,
KEY_TEMP_PAGESPACE_DIR,
KEY_MAX_STATEMENT_CACHE_SIZE,
KEY_PARALLEL_WORKERS,
KEY_MAX_PARALLEL_WORKERS,
MAX_CONFIG_KEY // keep it last
};
@ -306,7 +308,9 @@ constexpr ConfigEntry entries[MAX_CONFIG_KEY] =
{TYPE_BOOLEAN, "UseFileSystemCache", false, true},
{TYPE_INTEGER, "InlineSortThreshold", false, 1000}, // bytes
{TYPE_STRING, "TempTableDirectory", false, ""},
{TYPE_INTEGER, "MaxStatementCacheSize", false, 2 * 1048576} // bytes
{TYPE_INTEGER, "MaxStatementCacheSize", false, 2 * 1048576}, // bytes
{TYPE_INTEGER, "ParallelWorkers", true, 1},
{TYPE_INTEGER, "MaxParallelWorkers", true, 1}
};
@ -633,6 +637,10 @@ public:
CONFIG_GET_PER_DB_STR(getTempPageSpaceDirectory, KEY_TEMP_PAGESPACE_DIR);
CONFIG_GET_PER_DB_INT(getMaxStatementCacheSize, KEY_MAX_STATEMENT_CACHE_SIZE);
CONFIG_GET_GLOBAL_INT(getParallelWorkers, KEY_PARALLEL_WORKERS);
CONFIG_GET_GLOBAL_INT(getMaxParallelWorkers, KEY_MAX_PARALLEL_WORKERS);
};
// Implementation of interface to access master configuration file

View File

@ -1438,6 +1438,7 @@ public:
static const unsigned MARK_MERGE = 0x02; // node is part of MERGE statement
static const unsigned MARK_FOR_UPDATE = 0x04; // implicit cursor used in UPDATE\DELETE\MERGE statement
static const unsigned MARK_AVOID_COUNTERS = 0x08; // do not touch record counters
static const unsigned MARK_BULK_INSERT = 0x10; // StoreNode is used for bulk operation
struct ExeState
{

View File

@ -8033,6 +8033,9 @@ const StmtNode* StoreNode::store(thread_db* tdbb, Request* request, WhichTrigger
record_param* rpb = &request->req_rpb[stream];
jrd_rel* relation = rpb->rpb_relation;
if ((marks & MARK_BULK_INSERT) || request->req_batch_mode)
rpb->rpb_stream_flags |= RPB_s_bulk;
const auto localTableSource = nodeAs<LocalTableSourceNode>(target);
const auto localTable = localTableSource ?
request->getStatement()->localTables[localTableSource->tableNumber] :

View File

@ -129,6 +129,8 @@
#define isc_dpb_decfloat_round 94
#define isc_dpb_decfloat_traps 95
#define isc_dpb_clear_map 96
#define isc_dpb_parallel_workers 100
#define isc_dpb_worker_attach 101
/**************************************************/
@ -421,6 +423,7 @@
#define isc_spb_bkp_keyname 17
#define isc_spb_bkp_crypt 18
#define isc_spb_bkp_include_data 19
#define isc_spb_bkp_parallel_workers 21
#define isc_spb_bkp_ignore_checksums 0x01
#define isc_spb_bkp_ignore_limbo 0x02
#define isc_spb_bkp_metadata_only 0x04
@ -431,6 +434,7 @@
#define isc_spb_bkp_expand 0x80
#define isc_spb_bkp_no_triggers 0x8000
#define isc_spb_bkp_zip 0x010000
#define isc_spb_bkp_direct_io 0x020000
/********************************************
* Parameters for isc_action_svc_properties *
@ -521,6 +525,7 @@
#define isc_spb_rpr_commit_trans_64 49
#define isc_spb_rpr_rollback_trans_64 50
#define isc_spb_rpr_recover_two_phase_64 51
#define isc_spb_rpr_par_workers 52
#define isc_spb_rpr_validate_db 0x01
#define isc_spb_rpr_sweep_db 0x02
@ -548,6 +553,7 @@
#define isc_spb_res_keyname isc_spb_bkp_keyname
#define isc_spb_res_crypt isc_spb_bkp_crypt
#define isc_spb_res_stat isc_spb_bkp_stat
#define isc_spb_res_parallel_workers isc_spb_bkp_parallel_workers
#define isc_spb_res_metadata_only isc_spb_bkp_metadata_only
#define isc_spb_res_deactivate_idx 0x0100
#define isc_spb_res_no_shadow 0x0200
@ -556,6 +562,7 @@
#define isc_spb_res_replace 0x1000
#define isc_spb_res_create 0x2000
#define isc_spb_res_use_all_space 0x4000
#define isc_spb_res_direct_io isc_spb_bkp_direct_io
#define isc_spb_res_replica_mode 20
/*****************************************

View File

@ -401,3 +401,7 @@ FB_IMPL_MSG_NO_SYMBOL(GBAK, 402, "publication for table")
FB_IMPL_MSG_SYMBOL(GBAK, 403, gbak_opt_replica, " @1REPLICA <mode> \"none\", \"read_only\" or \"read_write\" replica mode")
FB_IMPL_MSG_SYMBOL(GBAK, 404, gbak_replica_req, "\"none\", \"read_only\" or \"read_write\" required")
FB_IMPL_MSG_NO_SYMBOL(GBAK, 405, "could not access batch parameters")
FB_IMPL_MSG_NO_SYMBOL(GBAK, 406, " @1PAR(ALLEL) parallel workers")
FB_IMPL_MSG_SYMBOL(GBAK, 407, gbak_missing_prl_wrks, "parallel workers parameter missing")
FB_IMPL_MSG_SYMBOL(GBAK, 408, gbak_inv_prl_wrks, "expected parallel workers, encountered \"@1\"")
FB_IMPL_MSG_NO_SYMBOL(GBAK, 409, " @1D(IRECT_IO) direct IO for backup file(s)")

View File

@ -133,3 +133,4 @@ FB_IMPL_MSG_SYMBOL(GFIX, 132, gfix_opt_role, " -role set SQL ro
FB_IMPL_MSG_SYMBOL(GFIX, 133, gfix_role_req, "SQL role name required")
FB_IMPL_MSG_SYMBOL(GFIX, 134, gfix_opt_repl, " -repl(ica) replica mode <none / read_only / read_write>")
FB_IMPL_MSG_SYMBOL(GFIX, 135, gfix_repl_mode_req, "replica mode (none / read_only / read_write) required")
FB_IMPL_MSG_SYMBOL(GFIX, 136, gfix_opt_parallel, " -par(allel) parallel workers <n> (-sweep)")

View File

@ -3856,6 +3856,8 @@ const
isc_dpb_decfloat_round = byte(94);
isc_dpb_decfloat_traps = byte(95);
isc_dpb_clear_map = byte(96);
isc_dpb_parallel_workers = byte(100);
isc_dpb_worker_attach = byte(101);
isc_dpb_address = byte(1);
isc_dpb_addr_protocol = byte(1);
isc_dpb_addr_endpoint = byte(2);
@ -4013,6 +4015,7 @@ const
isc_spb_bkp_keyname = byte(17);
isc_spb_bkp_crypt = byte(18);
isc_spb_bkp_include_data = byte(19);
isc_spb_bkp_parallel_workers = byte(21);
isc_spb_bkp_ignore_checksums = $01;
isc_spb_bkp_ignore_limbo = $02;
isc_spb_bkp_metadata_only = $04;
@ -4023,6 +4026,7 @@ const
isc_spb_bkp_expand = $80;
isc_spb_bkp_no_triggers = $8000;
isc_spb_bkp_zip = $010000;
isc_spb_bkp_direct_io = $020000;
isc_spb_prp_page_buffers = byte(5);
isc_spb_prp_sweep_interval = byte(6);
isc_spb_prp_shutdown_db = byte(7);
@ -4078,6 +4082,7 @@ const
isc_spb_rpr_commit_trans_64 = byte(49);
isc_spb_rpr_rollback_trans_64 = byte(50);
isc_spb_rpr_recover_two_phase_64 = byte(51);
isc_spb_rpr_par_workers = byte(52);
isc_spb_rpr_validate_db = $01;
isc_spb_rpr_sweep_db = $02;
isc_spb_rpr_mend_db = $04;

View File

@ -254,6 +254,7 @@ Jrd::Attachment::Attachment(MemoryPool* pool, Database* dbb, JProvider* provider
att_dest_bind(&att_bindings),
att_original_timezone(TimeZoneUtil::getSystemTimeZone()),
att_current_timezone(att_original_timezone),
att_parallel_workers(0),
att_repl_appliers(*pool),
att_utility(UTIL_NONE),
att_procedures(*pool),
@ -1035,7 +1036,7 @@ void StableAttachmentPart::manualAsyncUnlock(ULONG& flags)
}
}
void StableAttachmentPart::onIdleTimer(TimerImpl*)
void StableAttachmentPart::doOnIdleTimer(TimerImpl*)
{
// Ensure attachment is still alive and still idle
@ -1074,8 +1075,11 @@ void Attachment::setupIdleTimer(bool clear)
{
if (!att_idle_timer)
{
att_idle_timer = FB_NEW IdleTimer(getStable());
att_idle_timer->setOnTimer(&StableAttachmentPart::onIdleTimer);
using IdleTimer = TimerWithRef<StableAttachmentPart>;
auto idleTimer = FB_NEW IdleTimer(getStable());
idleTimer->setOnTimer(&StableAttachmentPart::onIdleTimer);
att_idle_timer = idleTimer;
}
att_idle_timer->reset(timeout);

View File

@ -166,6 +166,7 @@ const ULONG ATT_monitor_init = 0x100000L; // Attachment is registered in monito
const ULONG ATT_repl_reset = 0x200000L; // Replication set has been reset
const ULONG ATT_replicating = 0x400000L; // Replication is active
const ULONG ATT_resetting = 0x800000L; // Session reset is in progress
const ULONG ATT_worker = 0x1000000L; // Worker attachment, managed by the engine
const ULONG ATT_NO_CLEANUP = (ATT_no_cleanup | ATT_notify_gc);
@ -373,7 +374,13 @@ public:
return shutError;
}
void onIdleTimer(Firebird::TimerImpl*);
void onIdleTimer(Firebird::TimerImpl* timer)
{
doOnIdleTimer(timer);
}
protected:
virtual void doOnIdleTimer(Firebird::TimerImpl* timer);
private:
Attachment* att;
@ -602,6 +609,7 @@ public:
CoercionArray* att_dest_bind;
USHORT att_original_timezone;
USHORT att_current_timezone;
int att_parallel_workers;
Firebird::RefPtr<Firebird::IReplicatedSession> att_replicator;
Firebird::AutoPtr<Replication::TableMatcher> att_repl_matcher;
@ -807,9 +815,7 @@ private:
unsigned int att_idle_timeout; // seconds
unsigned int att_stmt_timeout; // milliseconds
typedef Firebird::TimerWithRef<StableAttachmentPart> IdleTimer;
Firebird::RefPtr<IdleTimer> att_idle_timer;
Firebird::RefPtr<Firebird::TimerImpl> att_idle_timer;
Firebird::Array<JBatch*> att_batches;
InitialOptions att_initial_options; // Initial session options
@ -961,7 +967,7 @@ public:
}
}
private:
protected:
void destroy(Attachment* attachment);
// "public" interface for internal (system) attachment

View File

@ -84,6 +84,7 @@ public:
ULONG rel_pri_data_space; // lowest pointer page with primary data page space
ULONG rel_sec_data_space; // lowest pointer page with secondary data page space
ULONG rel_last_free_pri_dp; // last primary data page found with space
ULONG rel_last_free_blb_dp; // last blob data page found with space
USHORT rel_pg_space_id;
RelationPages(Firebird::MemoryPool& pool)

View File

@ -0,0 +1,479 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: WorkerAttachment.cpp
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#include "../jrd/WorkerAttachment.h"
#include "../common/config/config.h"
#include "../common/isc_proto.h"
#include "../common/utils_proto.h"
#include "../common/StatusArg.h"
#include "../common/classes/ClumpletWriter.h"
#include "../jrd/jrd.h"
#include "../jrd/ini_proto.h"
#include "../jrd/lck_proto.h"
#include "../jrd/pag_proto.h"
#include "../jrd/tra_proto.h"
#include "../jrd/status.h"
using namespace Firebird;
namespace Jrd {
const unsigned WORKER_IDLE_TIMEOUT = 60; // 1 minute
/// class WorkerStableAttachment
WorkerStableAttachment::WorkerStableAttachment(FbStatusVector* status, Jrd::Attachment* attachment) :
SysStableAttachment(attachment)
{
UserId user;
user.setUserName("<Worker>");
// user.usr_flags = USR_owner; // need owner privs ??
attachment->att_user = FB_NEW_POOL(*attachment->att_pool) UserId(*attachment->att_pool, user);
attachment->setStable(this);
BackgroundContextHolder tdbb(attachment->att_database, attachment, status, FB_FUNCTION);
LCK_init(tdbb, LCK_OWNER_attachment);
INI_init(tdbb);
INI_init2(tdbb);
PAG_header(tdbb, true);
PAG_attachment_id(tdbb);
TRA_init(attachment);
Monitoring::publishAttachment(tdbb);
initDone();
}
WorkerStableAttachment::~WorkerStableAttachment()
{
fini();
}
WorkerStableAttachment* WorkerStableAttachment::create(FbStatusVector* status, Jrd::Database* dbb)
{
Attachment* attachment = NULL;
try
{
attachment = Attachment::create(dbb, NULL);
attachment->att_filename = dbb->dbb_filename;
attachment->att_flags |= ATT_worker;
WorkerStableAttachment* sAtt = FB_NEW WorkerStableAttachment(status, attachment);
return sAtt;
}
catch (const Exception& ex)
{
ex.stuffException(status);
}
if (attachment)
Attachment::destroy(attachment);
return NULL;
}
void WorkerStableAttachment::doOnIdleTimer(Firebird::TimerImpl* timer)
{
WorkerAttachment::detachIdle(this);
}
void WorkerStableAttachment::fini()
{
Attachment* attachment = NULL;
{
AttSyncLockGuard guard(*getSync(), FB_FUNCTION);
attachment = getHandle();
if (!attachment)
return;
Database* dbb = attachment->att_database;
FbLocalStatus status_vector;
BackgroundContextHolder tdbb(dbb, attachment, &status_vector, FB_FUNCTION);
Monitoring::cleanupAttachment(tdbb);
attachment->releaseLocks(tdbb);
LCK_fini(tdbb, LCK_OWNER_attachment);
attachment->releaseRelations(tdbb);
}
destroy(attachment);
}
/// class WorkerAttachment
GlobalPtr<Mutex> WorkerAttachment::m_mapMutex;
GlobalPtr<WorkerAttachment::MapDbIdToWorkAtts> WorkerAttachment::m_map;
bool WorkerAttachment::m_shutdown = false;
WorkerAttachment::WorkerAttachment() :
m_idleAtts(*getDefaultMemoryPool()),
m_activeAtts(*getDefaultMemoryPool()),
m_cntUserAtts(0)
{
}
void WorkerAttachment::incUserAtts(const PathName& dbname)
{
if (Config::getServerMode() == MODE_SUPER)
return;
WorkerAttachment* item = getByName(dbname);
if (item)
{
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
item->m_cntUserAtts++;
}
}
void WorkerAttachment::decUserAtts(const PathName& dbname)
{
if (Config::getServerMode() == MODE_SUPER)
return;
WorkerAttachment* item = getByName(dbname);
if (item)
{
bool tryClear = false;
{
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
item->m_cntUserAtts--;
tryClear = (item->m_cntUserAtts == 0 && item->m_activeAtts.isEmpty());
}
if (tryClear)
item->clear(true);
}
}
WorkerAttachment* WorkerAttachment::getByName(const PathName& dbname)
{
if (m_shutdown)
return NULL;
WorkerAttachment* ret = NULL;
MutexLockGuard guard(m_mapMutex, FB_FUNCTION);
if (m_shutdown)
return NULL;
if (!m_map->get(dbname, ret))
{
ret = new WorkerAttachment();
m_map->put(dbname, ret);
}
return ret;
}
void WorkerAttachment::shutdown()
{
if (m_shutdown)
return;
MutexLockGuard guard(m_mapMutex, FB_FUNCTION);
if (m_shutdown)
return;
m_shutdown = true;
MapDbIdToWorkAtts::Accessor acc(&m_map);
if (!acc.getFirst())
return;
do
{
WorkerAttachment* item = acc.current()->second;
item->clear(false);
delete item;
}
while (acc.getNext());
m_map->clear();
}
void WorkerAttachment::shutdownDbb(Database* dbb)
{
if (Config::getServerMode() != MODE_SUPER)
return;
MutexLockGuard guard(m_mapMutex, FB_FUNCTION);
WorkerAttachment* item = NULL;
if (!m_map->get(dbb->dbb_filename, item))
return;
item->clear(false);
}
StableAttachmentPart* WorkerAttachment::getAttachment(FbStatusVector* status, Database* dbb)
{
// There should be no locked attachment.
#ifdef _DEBUG
thread_db* tdbb = JRD_get_thread_data();
if (tdbb)
{
Attachment* att = tdbb->getAttachment();
if (att)
{
const StableAttachmentPart::Sync* sync = att->getStable()->getSync();
fb_assert(!sync || !sync->locked());
}
}
#endif
Arg::Gds(isc_shutdown).copyTo(status);
WorkerAttachment* item = getByName(dbb->dbb_filename);
if (!item)
return NULL;
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
if (m_shutdown)
return NULL;
FB_SIZE_T maxWorkers = Config::getMaxParallelWorkers();
if (maxWorkers <= 0)
maxWorkers = MAX_ULONG;
StableAttachmentPart* sAtt = NULL;
while (!item->m_idleAtts.isEmpty())
{
if (m_shutdown)
return NULL;
sAtt = item->m_idleAtts.pop();
if (sAtt->getHandle())
break;
// idle worker attachment was unexpectedly deleted, clean up and try next one
MutexUnlockGuard unlock(item->m_mutex, FB_FUNCTION);
FbLocalStatus local;
doDetach(&local, sAtt);
sAtt = NULL;
}
if (!sAtt)
{
if (item->m_activeAtts.getCount() >= maxWorkers)
{
(Arg::Gds(isc_random) << Arg::Str("No enough free worker attachments")).copyTo(status);
return NULL;
}
MutexUnlockGuard unlock(item->m_mutex, FB_FUNCTION);
status->init();
sAtt = doAttach(status, dbb);
if (!sAtt)
{
// log error ?
if (!m_shutdown)
iscLogStatus("Failed to create worker attachment\n", status);
return NULL;
}
}
Attachment* att = NULL;
{
MutexUnlockGuard unlock(item->m_mutex, FB_FUNCTION);
AttSyncLockGuard guard(*sAtt->getSync(), FB_FUNCTION);
att = sAtt->getHandle();
fb_assert(!att || (att->att_flags & ATT_worker));
if (att)
{
att->att_use_count++;
att->setupIdleTimer(true);
}
}
if (att)
item->m_activeAtts.add(sAtt);
return sAtt;
}
void WorkerAttachment::releaseAttachment(FbStatusVector* status, StableAttachmentPart* sAtt)
{
status->init();
WorkerAttachment* item = NULL;
{
AttSyncLockGuard attGuard(*sAtt->getSync(), FB_FUNCTION);
Attachment* att = sAtt->getHandle();
if (!att)
return;
att->att_use_count--;
att->setupIdleTimer(false);
item = getByName(att->att_database->dbb_filename);
}
const bool detach = (m_shutdown || (item == NULL));
bool tryClear = false;
if (item)
{
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
FB_SIZE_T pos;
if (item->m_activeAtts.find(sAtt, pos))
item->m_activeAtts.remove(pos);
if (!m_shutdown)
{
item->m_idleAtts.push(sAtt);
tryClear = (item->m_cntUserAtts == 0 && item->m_activeAtts.isEmpty());
}
}
if (detach)
doDetach(status, sAtt);
if (tryClear && (Config::getServerMode() != MODE_SUPER))
item->clear(true);
}
void WorkerAttachment::clear(bool checkRefs)
{
HalfStaticArray<Jrd::StableAttachmentPart*, 8> toDetach(*getDefaultMemoryPool());
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (checkRefs && (m_cntUserAtts != 0 || !m_activeAtts.isEmpty()))
return;
toDetach.assign(m_idleAtts);
m_idleAtts.clear();
m_activeAtts.clear(); // should be released by regular JRD shutdown
}
FbLocalStatus status;
while (!toDetach.isEmpty())
{
StableAttachmentPart* sAtt = toDetach.pop();
doDetach(&status, sAtt);
}
}
bool WorkerAttachment::detachIdle(StableAttachmentPart* sAtt)
{
WorkerAttachment* item = NULL;
{ // scope
AttSyncLockGuard attGuard(sAtt->getSync(), FB_FUNCTION);
Attachment* att = sAtt->getHandle();
if (!att || att->att_use_count > 0)
return false;
item = getByName(att->att_database->dbb_filename);
}
if (item)
{
MutexLockGuard guard(item->m_mutex, FB_FUNCTION);
FB_SIZE_T pos;
if (item->m_idleAtts.find(sAtt, pos))
item->m_idleAtts.remove(pos);
else
return false;
}
FbLocalStatus status;
doDetach(&status, sAtt);
return true;
}
StableAttachmentPart* WorkerAttachment::doAttach(FbStatusVector* status, Database* dbb)
{
StableAttachmentPart* sAtt = NULL;
if (Config::getServerMode() == MODE_SUPER)
sAtt = WorkerStableAttachment::create(status, dbb);
else
{
ClumpletWriter dpb(ClumpletReader::Tagged, MAX_DPB_SIZE, isc_dpb_version1);
dpb.insertString(isc_dpb_trusted_auth, DBA_USER_NAME);
dpb.insertInt(isc_dpb_worker_attach, 1);
AutoPlugin<JProvider> jInstance(JProvider::getInstance());
//jInstance->setDbCryptCallback(&status, tdbb->getAttachment()->att_crypt_callback);
JAttachment* jAtt = jInstance->attachDatabase(status, dbb->dbb_filename.c_str(),
dpb.getBufferLength(), dpb.getBuffer());
if (!(status->getState() & IStatus::STATE_ERRORS))
sAtt = jAtt->getStable();
}
if (sAtt)
{
sAtt->addRef(); // !!
sAtt->getHandle()->setIdleTimeout(WORKER_IDLE_TIMEOUT);
}
return sAtt;
}
void WorkerAttachment::doDetach(FbStatusVector* status, StableAttachmentPart* sAtt)
{
status->init();
// if (att->att_flags & ATT_system)
if (Config::getServerMode() == MODE_SUPER)
{
WorkerStableAttachment* wrk = reinterpret_cast<WorkerStableAttachment*>(sAtt);
wrk->fini();
}
else
{
JAttachment* jAtt = sAtt->getInterface();
jAtt->detach(status);
}
sAtt->release(); // !!
}
} // namespace Jrd

117
src/jrd/WorkerAttachment.h Normal file
View File

@ -0,0 +1,117 @@
/*
* PROGRAM: Firebird Database Engine
* MODULE: WorkerAttachment.h
* DESCRIPTION: Parallel task execution support
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Khorsun Vladyslav
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2019 Khorsun Vladyslav <hvlad@users.sourceforge.net>
* and all contributors signed below.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*
*/
#ifndef JRD_WORKER_ATTACHMENT_H
#define JRD_WORKER_ATTACHMENT_H
#include "firebird.h"
#include "../common/classes/alloc.h"
#include "../common/classes/array.h"
#include "../common/classes/fb_string.h"
#include "../common/classes/GenericMap.h"
#include "../common/classes/init.h"
#include "../common/classes/locks.h"
#include "../jrd/Attachment.h"
#include "../jrd/jrd.h"
#include "../jrd/status.h"
namespace Jrd
{
class WorkerStableAttachment : public SysStableAttachment
{
public:
static WorkerStableAttachment* create(FbStatusVector* status, Jrd::Database* dbb);
void fini();
protected:
virtual void doOnIdleTimer(Firebird::TimerImpl* timer);
private:
explicit WorkerStableAttachment(FbStatusVector* status, Jrd::Attachment* att);
virtual ~WorkerStableAttachment();
};
class WorkerContextHolder : public Jrd::DatabaseContextHolder, public Jrd::Attachment::SyncGuard
{
public:
WorkerContextHolder(thread_db* tdbb, const char* f) :
DatabaseContextHolder(tdbb),
Jrd::Attachment::SyncGuard(tdbb->getAttachment(), f)
{
}
private:
// copying is prohibited
WorkerContextHolder(const WorkerContextHolder&);
WorkerContextHolder& operator=(const WorkerContextHolder&);
};
class WorkerAttachment
{
public:
explicit WorkerAttachment();
static Jrd::StableAttachmentPart* getAttachment(FbStatusVector* status, Jrd::Database* dbb);
static void releaseAttachment(FbStatusVector* status, Jrd::StableAttachmentPart* sAtt);
static bool detachIdle(Jrd::StableAttachmentPart* sAtt);
static void incUserAtts(const Firebird::PathName& dbname);
static void decUserAtts(const Firebird::PathName& dbname);
static void shutdown();
static void shutdownDbb(Jrd::Database* dbb);
private:
static WorkerAttachment* getByName(const Firebird::PathName& dbname);
static Jrd::StableAttachmentPart* doAttach(FbStatusVector* status, Jrd::Database* dbb);
static void doDetach(FbStatusVector* status, Jrd::StableAttachmentPart* sAtt);
void clear(bool checkRefs);
typedef Firebird::GenericMap<Firebird::Pair<Firebird::Left<Firebird::PathName, WorkerAttachment*> > >
MapDbIdToWorkAtts;
static Firebird::GlobalPtr<Firebird::Mutex> m_mapMutex;
static Firebird::GlobalPtr<MapDbIdToWorkAtts> m_map;
static bool m_shutdown;
Firebird::Mutex m_mutex;
Firebird::HalfStaticArray<Jrd::StableAttachmentPart*, 8> m_idleAtts;
Firebird::SortedArray<Jrd::StableAttachmentPart*,
Firebird::InlineStorage<Jrd::StableAttachmentPart*, 8> > m_activeAtts;
int m_cntUserAtts;
};
} // namespace Jrd
#endif // JRD_WORKER_ATTACHMENT_H

View File

@ -950,7 +950,7 @@ SLONG blb::BLB_lseek(USHORT mode, SLONG offset)
// compiler allows to modify from_desc->dsc_address' contents when from_desc is
// constant, this is misleading so I didn't make the source descriptor constant.
void blb::move(thread_db* tdbb, dsc* from_desc, dsc* to_desc,
jrd_rel* relation, Record* record, USHORT fieldId)
jrd_rel* relation, Record* record, USHORT fieldId, bool bulk)
{
/**************************************
*
@ -1206,6 +1206,9 @@ void blb::move(thread_db* tdbb, dsc* from_desc, dsc* to_desc,
#ifdef CHECK_BLOB_FIELD_ACCESS_FOR_SELECT
blob->blb_fld_id = fieldId;
#endif
if (bulk)
blob->blb_flags |= BLB_bulk;
destination->set_permanent(relation->rel_id, DPM_store_blob(tdbb, blob, record));
// This is the only place in the engine where blobs are materialized
// If new places appear code below should transform to common sub-routine

View File

@ -109,7 +109,7 @@ public:
static SLONG get_slice(Jrd::thread_db*, Jrd::jrd_tra*, const Jrd::bid*, const UCHAR*, USHORT,
const UCHAR*, SLONG, UCHAR*);
SLONG BLB_lseek(USHORT, SLONG);
static void move(thread_db* tdbb, dsc* from_desc, dsc* to_desc, jrd_rel* relation = nullptr, Record* record = nullptr, USHORT fieldId = 0);
static void move(thread_db* tdbb, dsc* from_desc, dsc* to_desc, jrd_rel* relation = nullptr, Record* record = nullptr, USHORT fieldId = 0, bool bulk = false);
static blb* open(thread_db*, jrd_tra*, const bid*);
static blb* open2(thread_db*, jrd_tra*, const bid*, USHORT, const UCHAR*, bool = false);
void BLB_put_data(thread_db*, const UCHAR*, SLONG);
@ -179,6 +179,7 @@ const int BLB_closed = 8; // Temporary blob has been closed
const int BLB_damaged = 16; // Blob is busted
const int BLB_seek = 32; // Seek is pending
const int BLB_large_scan = 64; // Blob is larger than page buffer cache
const int BLB_bulk = 128; // Blob created by bulk insert operation
/* Blob levels are:

View File

@ -3440,7 +3440,6 @@ static ULONG fast_load(thread_db* tdbb,
jrd_rel* const relation = creation.relation;
index_desc* const idx = creation.index;
const USHORT key_length = creation.key_length;
Sort* const scb = creation.sort;
const USHORT pageSpaceID = relation->getPages(tdbb)->rel_pg_space_id;
@ -3560,9 +3559,9 @@ static ULONG fast_load(thread_db* tdbb,
// Get the next record in sorted order.
UCHAR* record;
scb->get(tdbb, reinterpret_cast<ULONG**>(&record));
creation.sort->get(tdbb, reinterpret_cast<ULONG**>(&record));
if (!record || creation.duplicates)
if (!record || creation.duplicates.value())
break;
index_sort_record* isr = (index_sort_record*) (record + key_length);
@ -3780,7 +3779,7 @@ static ULONG fast_load(thread_db* tdbb,
++duplicates;
if (unique && primarySeen && isPrimary && !(isr->isr_flags & ISR_null))
{
creation.duplicates++;
++creation.duplicates;
creation.dup_recno = isr->isr_record_number;
}
@ -4152,10 +4151,6 @@ static ULONG fast_load(thread_db* tdbb,
tdbb->tdbb_flags &= ~TDBB_no_cache_unwind;
// do some final housekeeping
creation.sort.reset();
// If index flush fails, try to delete the index tree.
// If the index delete fails, just go ahead and punt.
try

View File

@ -48,6 +48,8 @@ struct temporary_key;
class jrd_tra;
class BtrPageGCLock;
class Sort;
class PartitionedSort;
struct sort_key_def;
// Index descriptor block -- used to hold info from index root page
@ -278,11 +280,14 @@ struct IndexCreation
{
jrd_rel* relation;
index_desc* index;
const TEXT* index_name;
jrd_tra* transaction;
PartitionedSort* sort;
sort_key_def* key_desc;
USHORT key_length;
Firebird::AutoPtr<Sort> sort;
USHORT nullIndLen;
SINT64 dup_recno;
SLONG duplicates;
Firebird::AtomicCounter duplicates;
};
// Class used to report any index related errors

View File

@ -1141,7 +1141,7 @@ void CCH_flush(thread_db* tdbb, USHORT flush_flag, TraNumber tra_number)
const Jrd::Attachment* att = tdbb->getAttachment();
const bool dontFlush = (dbb->dbb_flags & DBB_creating) ||
((dbb->dbb_ast_flags & DBB_shutdown_single) &&
((dbb->dbb_ast_flags & DBB_shutdown) &&
att && (att->att_flags & (ATT_creator | ATT_system)));
if (!(main_file->fil_flags & FIL_force_write) && (max_num || max_time) && !dontFlush)

View File

@ -961,6 +961,9 @@ void DPM_delete( thread_db* tdbb, record_param* rpb, ULONG prior_page)
if (relPages->rel_last_free_pri_dp == pages[i])
relPages->rel_last_free_pri_dp = 0;
if (relPages->rel_last_free_blb_dp == pages[i])
relPages->rel_last_free_blb_dp = 0;
relPages->setDPNumber(dpSequence + s, 0);
}
@ -1651,7 +1654,7 @@ punt:
}
bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage)
bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, FindNextRecordScope scope)
{
/**************************************
*
@ -1753,16 +1756,7 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
}
}
if (window->win_flags & WIN_large_scan)
CCH_RELEASE_TAIL(tdbb, window);
else if ((window->win_flags & WIN_garbage_collector) &&
(window->win_flags & WIN_garbage_collect))
{
CCH_RELEASE_TAIL(tdbb, window);
window->win_flags &= ~WIN_garbage_collect;
}
else
CCH_RELEASE(tdbb, window);
CCH_RELEASE(tdbb, window);
}
// Find the next pointer page, data page, and record
@ -1850,9 +1844,11 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
rpb->rpb_number.decrement();
check_swept(tdbb, rpb);
rpb->rpb_number = saveRecNo;
tdbb->checkCancelState();
}
if (onepage)
if (scope == DPM_next_data_page)
return false;
if (!(ppage = get_pointer_page(tdbb, rpb->rpb_relation, relPages, window,
@ -1862,7 +1858,7 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
}
}
if (onepage)
if (scope == DPM_next_data_page)
{
CCH_RELEASE(tdbb, window);
return false;
@ -1882,11 +1878,8 @@ bool DPM_next(thread_db* tdbb, record_param* rpb, USHORT lock_type, bool onepage
else
CCH_RELEASE(tdbb, window);
if (flags & ppg_eof || onepage)
if ((flags & ppg_eof) || (scope != DPM_next_all))
return false;
if (sweeper)
tdbb->checkCancelState();
}
}
@ -2196,6 +2189,10 @@ RecordNumber DPM_store_blob(thread_db* tdbb, blb* blob, Record* record)
rpb.rpb_relation = blob->blb_relation;
rpb.rpb_transaction_nr = tdbb->getTransaction()->tra_number;
rpb.rpb_flags = rpb_blob;
if (blob->blb_flags & BLB_bulk)
rpb.rpb_stream_flags |= RPB_s_bulk;
blh* header = (blh*) locate_space(tdbb, &rpb, (SSHORT) (BLH_SIZE + length),
stack, record, DPM_other);
@ -3313,14 +3310,20 @@ static rhd* locate_space(thread_db* tdbb,
}
}
if (type == DPM_primary && relPages->rel_last_free_pri_dp)
const bool isBlob = (type == DPM_other) && (rpb->rpb_flags & rpb_blob);
if ((type == DPM_primary) && relPages->rel_last_free_pri_dp ||
isBlob && relPages->rel_last_free_blb_dp)
{
window->win_page = relPages->rel_last_free_pri_dp;
window->win_page = (type == DPM_primary) ? relPages->rel_last_free_pri_dp :
relPages->rel_last_free_blb_dp;
data_page* dpage = (data_page*) CCH_FETCH(tdbb, window, LCK_write, pag_undefined);
const UCHAR wrongFlags = dpg_orphan |
((type == DPM_primary) ? dpg_secondary | dpg_large : 0);
const bool pageOk =
dpage->dpg_header.pag_type == pag_data &&
!(dpage->dpg_header.pag_flags & (dpg_secondary | dpg_large | dpg_orphan)) &&
!(dpage->dpg_header.pag_flags & wrongFlags) &&
dpage->dpg_relation == rpb->rpb_relation->rel_id &&
//dpage->dpg_sequence == dpSequence &&
(dpage->dpg_count > 0);
@ -3334,7 +3337,10 @@ static rhd* locate_space(thread_db* tdbb,
else
CCH_RELEASE(tdbb, window);
relPages->rel_last_free_pri_dp = 0;
if (type == DPM_primary)
relPages->rel_last_free_pri_dp = 0;
else
relPages->rel_last_free_blb_dp = 0;
}
// Look for space anywhere
@ -3347,9 +3353,17 @@ static rhd* locate_space(thread_db* tdbb,
ULONG pp_sequence =
(type == DPM_primary ? relPages->rel_pri_data_space : relPages->rel_sec_data_space);
const bool bulkInsert = (type == DPM_primary || isBlob) && (rpb->rpb_stream_flags & RPB_s_bulk);
for (;; pp_sequence++)
{
locklevel_t ppLock = LCK_read;
// Bulk inserts looks up for empty DP only to avoid contention with
// another attachments doing bulk inserts. Note, DP number is saved in
// relPages->rel_last_free_pri_dp and next insert by same attachment
// will use same DP while concurrent bulk attachments will ignore it as
// non-empty. Take write lock on PP early to clear 'empty' flag.
locklevel_t ppLock = bulkInsert ? LCK_write : LCK_read;
if (type == DPM_primary)
relPages->rel_pri_data_space = pp_sequence;
@ -3390,6 +3404,9 @@ static rhd* locate_space(thread_db* tdbb,
bool dp_is_empty = PPG_DP_BIT_TEST(bits, slot, ppg_dp_empty);
bool dp_is_secondary = PPG_DP_BIT_TEST(bits, slot, ppg_dp_secondary);
if (bulkInsert && !dp_is_empty)
continue;
if (dp_is_empty)
{
if (ppLock == LCK_read)
@ -3437,6 +3454,8 @@ static rhd* locate_space(thread_db* tdbb,
{
if (type == DPM_primary)
relPages->rel_last_free_pri_dp = dp_number;
else if (isBlob)
relPages->rel_last_free_blb_dp = dp_number;
return (rhd*)space;
}
@ -3469,7 +3488,14 @@ static rhd* locate_space(thread_db* tdbb,
space = find_space(tdbb, rpb, size, stack, record, type);
if (space)
{
if (type == DPM_primary)
relPages->rel_last_free_pri_dp = window->win_page.getPageNum();
else if (isBlob)
relPages->rel_last_free_blb_dp = window->win_page.getPageNum();
break;
}
}
if (i == 20)

View File

@ -26,6 +26,7 @@
#include "../jrd/RecordNumber.h"
#include "../jrd/sbm.h"
#include "../jrd/vio_proto.h"
// fwd. decl.
namespace Jrd
@ -68,7 +69,7 @@ void DPM_fetch_fragment(Jrd::thread_db*, Jrd::record_param*, USHORT);
SINT64 DPM_gen_id(Jrd::thread_db*, SLONG, bool, SINT64);
bool DPM_get(Jrd::thread_db*, Jrd::record_param*, SSHORT);
ULONG DPM_get_blob(Jrd::thread_db*, Jrd::blb*, RecordNumber, bool, ULONG);
bool DPM_next(Jrd::thread_db*, Jrd::record_param*, USHORT, bool);
bool DPM_next(Jrd::thread_db*, Jrd::record_param*, USHORT, Jrd::FindNextRecordScope);
void DPM_pages(Jrd::thread_db*, SSHORT, int, ULONG, ULONG);
#ifdef SUPERSERVER_V2
SLONG DPM_prefetch_bitmap(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::PageBitmap*, SLONG);

View File

@ -379,6 +379,7 @@ void EXE_assignment(thread_db* tdbb, const ValueExprNode* to, dsc* from_desc, bo
jrd_rel* relation = nullptr;
Record* record = nullptr;
USHORT fieldId = 0;
bool bulk = false;
if (to)
{
@ -389,12 +390,13 @@ void EXE_assignment(thread_db* tdbb, const ValueExprNode* to, dsc* from_desc, bo
relation = rpb->rpb_relation;
record = rpb->rpb_record;
fieldId = toField->fieldId;
bulk = rpb->rpb_stream_flags & RPB_s_bulk;
}
else if (!(nodeAs<ParameterNode>(to) || nodeAs<VariableNode>(to)))
BUGCHECK(199); // msg 199 expected field node
}
blb::move(tdbb, from_desc, to_desc, relation, record, fieldId);
blb::move(tdbb, from_desc, to_desc, relation, record, fieldId, bulk);
}
else if (!DSC_EQUIV(from_desc, to_desc, false))
{

View File

@ -63,6 +63,8 @@
#include "../jrd/vio_proto.h"
#include "../jrd/tra_proto.h"
#include "../jrd/Collation.h"
#include "../common/Task.h"
#include "../jrd/WorkerAttachment.h"
using namespace Jrd;
using namespace Ods;
@ -228,120 +230,314 @@ bool IDX_check_master_types(thread_db* tdbb, index_desc& idx, jrd_rel* partner_r
}
void IDX_create_index(thread_db* tdbb,
jrd_rel* relation,
index_desc* idx,
const TEXT* index_name,
USHORT* index_id,
jrd_tra* transaction,
SelectivityList& selectivity)
namespace Jrd {
class IndexCreateTask : public Task
{
/**************************************
*
* I D X _ c r e a t e _ i n d e x
*
**************************************
*
* Functional description
* Create and populate index.
*
**************************************/
idx_e result = idx_e_ok;
public:
IndexCreateTask(thread_db* tdbb, MemoryPool* pool, IndexCreation* creation) : Task(),
m_pool(pool),
m_tdbb_flags(tdbb->tdbb_flags),
m_creation(creation),
m_sorts(*m_pool),
m_items(*m_pool),
m_stop(false),
m_countPP(0),
m_nextPP(0)
{
m_dbb = tdbb->getDatabase();
Attachment* att = tdbb->getAttachment();
m_exprBlob.clear();
int workers = 1;
if (att->att_parallel_workers > 0)
workers = att->att_parallel_workers;
for (int i = 0; i < workers; i++)
m_items.add(FB_NEW_POOL(*m_pool) Item(this));
m_items[0]->m_ownAttach = false;
m_items[0]->m_attStable = att->getStable();
m_items[0]->m_tra = m_creation->transaction;
if (m_creation)
{
m_countPP = m_creation->relation->getPages(tdbb)->rel_pages->count();
if ((m_creation->index->idx_flags & idx_expressn) && (workers > 1))
MET_lookup_index_expression_blr(tdbb, m_creation->index_name, m_exprBlob);
}
}
virtual ~IndexCreateTask()
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
delete *p;
}
bool handler(WorkItem& _item);
bool getWorkItem(WorkItem** pItem);
bool getResult(IStatus* status);
int getMaxWorkers();
class Item : public Task::WorkItem
{
public:
Item(IndexCreateTask* task) : Task::WorkItem(task),
m_inuse(false),
m_ownAttach(true),
m_tra(NULL),
m_sort(NULL),
m_ppSequence(0)
{}
virtual ~Item()
{
if (m_sort)
{
MutexLockGuard guard(getTask()->m_mutex, FB_FUNCTION);
delete m_sort;
m_sort = NULL;
}
if (!m_ownAttach || !m_attStable)
return;
Attachment* att = NULL;
{
AttSyncLockGuard guard(*m_attStable->getSync(), FB_FUNCTION);
att = m_attStable->getHandle();
if (!att)
return;
fb_assert(att->att_use_count > 0);
}
FbLocalStatus status;
if (m_tra)
{
BackgroundContextHolder tdbb(att->att_database, att, &status, FB_FUNCTION);
TRA_commit(tdbb, m_tra, false);
}
WorkerAttachment::releaseAttachment(&status, m_attStable);
}
bool init(thread_db* tdbb)
{
FbStatusVector* status = tdbb->tdbb_status_vector;
Attachment* att = NULL;
if (m_ownAttach && !m_attStable.hasData())
m_attStable = WorkerAttachment::getAttachment(status, getTask()->m_dbb);
if (m_attStable)
att = m_attStable->getHandle();
if (!att)
{
Arg::Gds(isc_bad_db_handle).copyTo(status);
return false;
}
IndexCreation* creation = getTask()->m_creation;
tdbb->setDatabase(att->att_database);
tdbb->setAttachment(att);
if (m_ownAttach && !m_tra)
{
try
{
WorkerContextHolder holder(tdbb, FB_FUNCTION);
m_tra = TRA_start(tdbb, creation->transaction->tra_flags,
creation->transaction->tra_lock_timeout);
}
catch (const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
return false;
}
}
tdbb->setTransaction(m_tra);
if (!m_sort)
{
m_idx = *creation->index; // copy
if (m_ownAttach)
{
m_idx.idx_expression = NULL;
m_idx.idx_expression_statement = NULL;
m_idx.idx_foreign_indexes = NULL;
m_idx.idx_foreign_primaries = NULL;
m_idx.idx_foreign_relations = NULL;
}
FPTR_REJECT_DUP_CALLBACK callback = NULL;
void* callback_arg = NULL;
if (m_idx.idx_flags & idx_unique)
{
callback = duplicate_key;
callback_arg = creation;
}
MutexLockGuard guard(getTask()->m_mutex, FB_FUNCTION);
m_sort = FB_NEW_POOL(getTask()->m_sorts.getPool())
Sort(att->att_database, &getTask()->m_sorts,
creation->key_length + sizeof(index_sort_record),
2, 1, creation->key_desc, callback, callback_arg);
creation->sort->addPartition(m_sort);
}
return true;
}
IndexCreateTask* getTask() const
{
return reinterpret_cast<IndexCreateTask*> (m_task);
}
bool m_inuse;
bool m_ownAttach;
RefPtr<StableAttachmentPart> m_attStable;
jrd_tra* m_tra;
index_desc m_idx;
Sort* m_sort;
ULONG m_ppSequence;
};
private:
void setError(IStatus* status, bool stopTask)
{
const bool copyStatus = (m_status.isSuccess() && status && status->getState() == IStatus::STATE_ERRORS);
if (!copyStatus && (!stopTask || m_stop))
return;
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (m_status.isSuccess() && copyStatus)
m_status.save(status);
if (stopTask)
m_stop = true;
}
MemoryPool* m_pool;
Database* m_dbb;
const ULONG m_tdbb_flags;
IndexCreation* m_creation;
SortOwner m_sorts;
bid m_exprBlob;
Mutex m_mutex;
HalfStaticArray<Item*, 8> m_items;
StatusHolder m_status;
volatile bool m_stop;
ULONG m_countPP;
ULONG m_nextPP;
};
bool IndexCreateTask::handler(WorkItem& _item)
{
Item* item = reinterpret_cast<Item*>(&_item);
ThreadContextHolder tdbb(NULL);
tdbb->tdbb_flags = m_tdbb_flags;
if (!item->init(tdbb))
{
setError(tdbb->tdbb_status_vector, true);
return false;
}
WorkerContextHolder holder(tdbb, FB_FUNCTION);
SET_TDBB(tdbb);
Database* dbb = tdbb->getDatabase();
Jrd::Attachment* attachment = tdbb->getAttachment();
Attachment* attachment = tdbb->getAttachment();
jrd_rel* relation = MET_relation(tdbb, m_creation->relation->rel_id);
if (!(relation->rel_flags & REL_scanned))
MET_scan_relation(tdbb, relation);
if (relation->rel_file)
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_extfile_uns_op) << Arg::Str(relation->rel_name));
}
else if (relation->isVirtual())
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_wish_list));
}
get_root_page(tdbb, relation);
fb_assert(transaction);
index_desc* idx = &item->m_idx;
jrd_tra* transaction = item->m_tra ? item->m_tra : m_creation->transaction;
Sort* scb = item->m_sort;
idx_e result = idx_e_ok;
RecordStack stack;
record_param primary, secondary;
secondary.rpb_relation = relation;
primary.rpb_relation = relation;
primary.rpb_relation = relation;
primary.rpb_number.setValue(BOF_NUMBER);
//primary.getWindow(tdbb).win_flags = secondary.getWindow(tdbb).win_flags = 0; redundant
const bool isDescending = (idx->idx_flags & idx_descending);
const bool isPrimary = (idx->idx_flags & idx_primary);
const bool isForeign = (idx->idx_flags & idx_foreign);
IndexErrorContext context(relation, idx, m_creation->index_name);
// hvlad: in ODS11 empty string and NULL values can have the same binary
// representation in index keys. BTR can distinguish it by the key_length
// but SORT module currently don't take it into account. Therefore add to
// the index key one byte prefix with 0 for NULL value and 1 for not-NULL
// value to produce right sorting.
// BTR\fast_load will remove this one byte prefix from the index key.
// Note that this is necessary only for single-segment ascending indexes
// and only for ODS11 and higher.
try {
const int nullIndLen = !isDescending && (idx->idx_count == 1) ? 1 : 0;
const USHORT key_length = ROUNDUP(BTR_key_length(tdbb, relation, idx) + nullIndLen, sizeof(SINT64));
if (key_length >= dbb->getMaxIndexKeyLength())
// If scan is finished, do final sort pass over own sort
if (item->m_ppSequence == m_countPP)
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_keytoobig) << Arg::Str(index_name));
//fb_assert((scb->scb_flags & scb_sorted) == 0);
if (item->m_ownAttach && idx->idx_expression_statement)
{
idx->idx_expression_statement->release(tdbb);
idx->idx_expression_statement = NULL;
}
if (!m_stop && m_creation->duplicates.value() == 0)
scb->sort(tdbb);
if (!m_stop && m_creation->duplicates.value() > 0)
{
AutoPtr<Record> error_record;
primary.rpb_record = NULL;
fb_assert(m_creation->dup_recno >= 0);
primary.rpb_number.setValue(m_creation->dup_recno);
if (DPM_get(tdbb, &primary, LCK_read))
{
if (primary.rpb_flags & rpb_deleted)
CCH_RELEASE(tdbb, &primary.getWindow(tdbb));
else
{
VIO_data(tdbb, &primary, dbb->dbb_permanent);
error_record = primary.rpb_record;
}
}
context.raise(tdbb, idx_e_duplicate, error_record);
}
return true;
}
IndexCreation creation;
creation.index = idx;
creation.relation = relation;
creation.transaction = transaction;
creation.key_length = key_length;
creation.dup_recno = -1;
creation.duplicates = 0;
BTR_reserve_slot(tdbb, creation);
if (index_id)
*index_id = idx->idx_id;
RecordStack stack;
const UCHAR pad = isDescending ? -1 : 0;
sort_key_def key_desc[2];
// Key sort description
key_desc[0].setSkdLength(SKD_bytes, key_length);
key_desc[0].skd_flags = SKD_ascending;
key_desc[0].setSkdOffset();
key_desc[0].skd_vary_offset = 0;
// RecordNumber sort description
key_desc[1].setSkdLength(SKD_int64, sizeof(RecordNumber));
key_desc[1].skd_flags = SKD_ascending;
key_desc[1].setSkdOffset(key_desc);
key_desc[1].skd_vary_offset = 0;
FPTR_REJECT_DUP_CALLBACK callback = (idx->idx_flags & idx_unique) ? duplicate_key : NULL;
void* callback_arg = (idx->idx_flags & idx_unique) ? &creation : NULL;
Sort* const scb = FB_NEW_POOL(transaction->tra_sorts.getPool())
Sort(dbb, &transaction->tra_sorts, key_length + sizeof(index_sort_record),
2, 1, key_desc, callback, callback_arg);
creation.sort = scb;
jrd_rel* partner_relation = NULL;
jrd_rel* partner_relation = 0;
USHORT partner_index_id = 0;
if (isForeign)
if (idx->idx_flags & idx_foreign)
{
if (!MET_lookup_partner(tdbb, relation, idx, index_name))
BUGCHECK(173); // msg 173 referenced index description not found
// if (!MET_lookup_partner(tdbb, relation, idx, m_creation->index_name)) {
// BUGCHECK(173); // msg 173 referenced index description not found
// }
partner_relation = MET_relation(tdbb, idx->idx_primary_relation);
partner_index_id = idx->idx_primary_index;
}
if ((idx->idx_flags & idx_expressn) && (idx->idx_expression == NULL))
{
fb_assert(!m_exprBlob.isEmpty());
CompilerScratch* csb = NULL;
Jrd::ContextPoolHolder context(tdbb, attachment->createPool());
idx->idx_expression = static_cast<ValueExprNode*> (MET_parse_blob(tdbb, relation, &m_exprBlob,
&csb, &idx->idx_expression_statement, false, false));
delete csb;
}
// Checkout a garbage collect record block for fetching data.
AutoGCRecord gc_record(VIO_gc_record(tdbb, relation));
@ -357,12 +553,29 @@ void IDX_create_index(thread_db* tdbb,
}
}
IndexErrorContext context(relation, idx, index_name);
const bool isDescending = (idx->idx_flags & idx_descending);
const bool isPrimary = (idx->idx_flags & idx_primary);
const bool isForeign = (idx->idx_flags & idx_foreign);
const UCHAR pad = isDescending ? -1 : 0;
bool key_is_null = false;
primary.rpb_number.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_ppSequence);
primary.rpb_number.decrement();
RecordNumber lastRecNo;
lastRecNo.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_ppSequence + 1);
lastRecNo.decrement();
// Loop thru the relation computing index keys. If there are old versions, find them, too.
temporary_key key;
while (DPM_next(tdbb, &primary, LCK_read, false))
while (DPM_next(tdbb, &primary, LCK_read, DPM_next_pointer_page))
{
if (primary.rpb_number >= lastRecNo)
{
CCH_RELEASE(tdbb, &primary.getWindow(tdbb));
break;
}
if (!VIO_garbage_collect(tdbb, &primary, transaction))
continue;
@ -389,7 +602,7 @@ void IDX_create_index(thread_db* tdbb,
secondary.rpb_line = primary.rpb_b_line;
secondary.rpb_prior = primary.rpb_prior;
while (secondary.rpb_page)
while (!m_stop && secondary.rpb_page)
{
if (!DPM_fetch(tdbb, &secondary, LCK_read))
break; // must be garbage collected
@ -401,7 +614,7 @@ void IDX_create_index(thread_db* tdbb,
secondary.rpb_line = secondary.rpb_b_line;
}
while (stack.hasData())
while (!m_stop && stack.hasData())
{
Record* record = stack.pop();
@ -444,7 +657,7 @@ void IDX_create_index(thread_db* tdbb,
context.raise(tdbb, result, record);
}
if (key.key_length > key_length)
if (key.key_length > m_creation->key_length)
{
do {
if (record != gc_record)
@ -462,7 +675,7 @@ void IDX_create_index(thread_db* tdbb,
// try to catch duplicates early
if (creation.duplicates > 0)
if (m_creation->duplicates.value() > 0)
{
do {
if (record != gc_record)
@ -472,7 +685,7 @@ void IDX_create_index(thread_db* tdbb,
break;
}
if (nullIndLen)
if (m_creation->nullIndLen)
*p++ = (key.key_length == 0) ? 0 : 1;
if (key.key_length > 0)
@ -481,7 +694,7 @@ void IDX_create_index(thread_db* tdbb,
p += key.key_length;
}
int l = int(key_length) - nullIndLen - key.key_length; // must be signed
int l = int(m_creation->key_length) - m_creation->nullIndLen - key.key_length; // must be signed
if (l > 0)
{
@ -499,7 +712,10 @@ void IDX_create_index(thread_db* tdbb,
delete record;
}
if (creation.duplicates > 0)
if (m_stop)
break;
if (m_creation->duplicates.value() > 0)
break;
JRD_reschedule(tdbb);
@ -509,18 +725,207 @@ void IDX_create_index(thread_db* tdbb,
if (primary.getWindow(tdbb).win_flags & WIN_large_scan)
--relation->rel_scan_count;
}
catch (const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
setError(tdbb->tdbb_status_vector, true);
return false;
}
if (!creation.duplicates)
scb->sort(tdbb);
return true;
}
// ASF: We have a callback accessing "creation", so don't join above and below if's.
bool IndexCreateTask::getWorkItem(WorkItem** pItem)
{
Item* item = reinterpret_cast<Item*> (*pItem);
if (!creation.duplicates)
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (m_stop)
return false;
if (item == NULL)
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
if (!(*p)->m_inuse)
{
(*p)->m_inuse = true;
*pItem = item = *p;
break;
}
}
if (!item)
return false;
item->m_inuse = (m_nextPP < m_countPP) ||
(item->m_sort && item->m_sort->isSorted()) == 0;
if (item->m_inuse)
{
item->m_ppSequence = m_nextPP;
if (m_nextPP < m_countPP)
m_nextPP += 1;
}
return item->m_inuse;
}
bool IndexCreateTask::getResult(IStatus* status)
{
if (status)
{
status->init();
status->setErrors(m_status.getErrors());
}
return m_status.isSuccess();
}
int IndexCreateTask::getMaxWorkers()
{
const int parWorkers = m_items.getCount();
if (parWorkers == 1 || m_countPP == 0)
return 1;
fb_assert(m_creation != NULL);
if (!m_creation || m_creation->relation->isTemporary())
return 1;
return MIN(parWorkers, m_countPP);
}
}; // namespace Jrd
void IDX_create_index(thread_db* tdbb,
jrd_rel* relation,
index_desc* idx,
const TEXT* index_name,
USHORT* index_id,
jrd_tra* transaction,
SelectivityList& selectivity)
{
/**************************************
*
* I D X _ c r e a t e _ i n d e x
*
**************************************
*
* Functional description
* Create and populate index.
*
**************************************/
idx_e result = idx_e_ok;
SET_TDBB(tdbb);
Database* dbb = tdbb->getDatabase();
Jrd::Attachment* attachment = tdbb->getAttachment();
if (relation->rel_file)
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_extfile_uns_op) << Arg::Str(relation->rel_name));
}
else if (relation->isVirtual())
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_wish_list));
}
get_root_page(tdbb, relation);
fb_assert(transaction);
const bool isDescending = (idx->idx_flags & idx_descending);
const bool isPrimary = (idx->idx_flags & idx_primary);
const bool isForeign = (idx->idx_flags & idx_foreign);
// hvlad: in ODS11 empty string and NULL values can have the same binary
// representation in index keys. BTR can distinguish it by the key_length
// but SORT module currently don't take it into account. Therefore add to
// the index key one byte prefix with 0 for NULL value and 1 for not-NULL
// value to produce right sorting.
// BTR\fast_load will remove this one byte prefix from the index key.
// Note that this is necessary only for single-segment ascending indexes
// and only for ODS11 and higher.
const int nullIndLen = !isDescending && (idx->idx_count == 1) ? 1 : 0;
const USHORT key_length = ROUNDUP(BTR_key_length(tdbb, relation, idx) + nullIndLen, sizeof(SINT64));
if (key_length >= dbb->getMaxIndexKeyLength())
{
ERR_post(Arg::Gds(isc_no_meta_update) <<
Arg::Gds(isc_keytoobig) << Arg::Str(index_name));
}
if (isForeign)
{
if (!MET_lookup_partner(tdbb, relation, idx, index_name)) {
BUGCHECK(173); // msg 173 referenced index description not found
}
}
IndexCreation creation;
creation.index = idx;
creation.index_name = index_name;
creation.relation = relation;
creation.transaction = transaction;
creation.sort = NULL;
creation.key_length = key_length;
creation.nullIndLen = nullIndLen;
creation.dup_recno = -1;
creation.duplicates.setValue(0);
BTR_reserve_slot(tdbb, creation);
if (index_id)
*index_id = idx->idx_id;
sort_key_def key_desc[2];
// Key sort description
key_desc[0].setSkdLength(SKD_bytes, key_length);
key_desc[0].skd_flags = SKD_ascending;
key_desc[0].setSkdOffset();
key_desc[0].skd_vary_offset = 0;
// RecordNumber sort description
key_desc[1].setSkdLength(SKD_int64, sizeof(RecordNumber));
key_desc[1].skd_flags = SKD_ascending;
key_desc[1].setSkdOffset(key_desc);
key_desc[1].skd_vary_offset = 0;
creation.key_desc = key_desc;
PartitionedSort sort(dbb, &transaction->tra_sorts);
creation.sort = &sort;
Coordinator coord(dbb->dbb_permanent);
IndexCreateTask task(tdbb, dbb->dbb_permanent, &creation);
{
EngineCheckout cout(tdbb, FB_FUNCTION);
FbLocalStatus local_status;
fb_utils::init_status(&local_status);
coord.runSync(&task);
if (!task.getResult(&local_status))
local_status.raise();
}
sort.buidMergeTree();
if (creation.duplicates.value() == 0)
BTR_create(tdbb, creation, selectivity);
if (creation.duplicates > 0)
if (creation.duplicates.value() > 0)
{
AutoPtr<Record> error_record;
record_param primary;
primary.rpb_relation = relation;
primary.rpb_record = NULL;
fb_assert(creation.dup_recno >= 0);
primary.rpb_number.setValue(creation.dup_recno);
@ -537,6 +942,7 @@ void IDX_create_index(thread_db* tdbb,
}
IndexErrorContext context(relation, idx, index_name);
context.raise(tdbb, idx_e_duplicate, error_record);
}
@ -1519,7 +1925,7 @@ static bool duplicate_key(const UCHAR* record1, const UCHAR* record2, void* ifl_
if (!(rec1->isr_flags & (ISR_secondary | ISR_null)) &&
!(rec2->isr_flags & (ISR_secondary | ISR_null)))
{
if (!ifl_data->duplicates++)
if (ifl_data->duplicates.exchangeAdd(1) == 0)
ifl_data->dup_recno = rec2->isr_record_number;
}

View File

@ -84,6 +84,7 @@ enum irq_type_t
irq_c_exp_index, // create expression index
irq_l_exp_index, // lookup expression index
irq_l_exp_index_blr, // lookup expression index BLR
irq_l_rel_id, // lookup relation id
irq_l_procedure, // lookup procedure name

View File

@ -113,6 +113,7 @@
#include "../jrd/ThreadCollect.h"
#include "../jrd/Database.h"
#include "../jrd/WorkerAttachment.h"
#include "../common/config/config.h"
#include "../common/config/dir_list.h"
@ -1094,6 +1095,8 @@ namespace Jrd
bool dpb_reset_icu;
bool dpb_map_attach;
ULONG dpb_remote_flags;
SSHORT dpb_parallel_workers;
bool dpb_worker_attach;
ReplicaMode dpb_replica_mode;
bool dpb_set_db_replica;
bool dpb_clear_map;
@ -2157,6 +2160,11 @@ JAttachment* JProvider::internalAttach(CheckStatusWrapper* user_status, const ch
}
}
if (options.dpb_parallel_workers)
{
attachment->att_parallel_workers = options.dpb_parallel_workers;
}
if (options.dpb_set_db_readonly)
{
validateAccess(tdbb, attachment, CHANGE_HEADER_SETTINGS);
@ -2250,6 +2258,11 @@ JAttachment* JProvider::internalAttach(CheckStatusWrapper* user_status, const ch
}
}
if (options.dpb_worker_attach)
attachment->att_flags |= ATT_worker;
else
WorkerAttachment::incUserAtts(dbb->dbb_filename);
jAtt->getStable()->manualUnlock(attachment->att_flags);
return jAtt;
@ -3107,6 +3120,11 @@ JAttachment* JProvider::createDatabase(CheckStatusWrapper* user_status, const ch
CCH_init2(tdbb);
VIO_init(tdbb);
if (options.dpb_parallel_workers)
{
attachment->att_parallel_workers = options.dpb_parallel_workers;
}
if (options.dpb_set_db_readonly)
{
if (!CCH_exclusive(tdbb, LCK_EX, WAIT_PERIOD, &dbbGuard))
@ -3176,6 +3194,8 @@ JAttachment* JProvider::createDatabase(CheckStatusWrapper* user_status, const ch
attachment->att_trace_manager->event_attach(&conn, true, ITracePlugin::RESULT_SUCCESS);
}
WorkerAttachment::incUserAtts(dbb->dbb_filename);
jAtt->getStable()->manualUnlock(attachment->att_flags);
return jAtt;
@ -3429,6 +3449,9 @@ void JAttachment::internalDropDatabase(CheckStatusWrapper* user_status)
Arg::Gds(isc_obj_in_use) << Arg::Str(file_name));
}
if (!(attachment->att_flags & ATT_worker))
WorkerAttachment::decUserAtts(dbb->dbb_filename);
// Lock header page before taking database lock
header = (Ods::header_page*) CCH_FETCH(tdbb, &window, LCK_write, pag_header);
@ -4564,7 +4587,7 @@ void JProvider::shutdown(CheckStatusWrapper* status, unsigned int timeout, const
}
ThreadContextHolder tdbb;
WorkerAttachment::shutdown();
EDS::Manager::shutdown();
ULONG attach_count, database_count, svc_count;
@ -6815,6 +6838,7 @@ void DatabaseOptions::get(const UCHAR* dpb, USHORT dpb_length, bool& invalid_cli
dpb_overwrite = false;
dpb_sql_dialect = 99;
invalid_client_SQL_dialect = false;
dpb_parallel_workers = Config::getParallelWorkers();
if (dpb_length == 0)
return;
@ -7197,6 +7221,23 @@ void DatabaseOptions::get(const UCHAR* dpb, USHORT dpb_length, bool& invalid_cli
dpb_clear_map = rdr.getBoolean();
break;
case isc_dpb_parallel_workers:
dpb_parallel_workers = (SSHORT) rdr.getInt();
if (dpb_parallel_workers > Config::getMaxParallelWorkers() ||
dpb_parallel_workers < 0)
{
string str;
str.printf("Wrong parallel workers value %i, valid range are from 1 to %i",
dpb_parallel_workers, Config::getMaxParallelWorkers());
ERR_post(Arg::Gds(isc_bad_dpb_content) << Arg::Gds(isc_random) << Arg::Str(str));
}
break;
case isc_dpb_worker_attach:
dpb_worker_attach = true;
break;
default:
break;
}
@ -7204,6 +7245,12 @@ void DatabaseOptions::get(const UCHAR* dpb, USHORT dpb_length, bool& invalid_cli
if (! rdr.isEof())
ERR_post(Arg::Gds(isc_bad_dpb_form));
if (dpb_worker_attach)
{
dpb_parallel_workers = 1;
dpb_no_db_triggers = true;
}
}
@ -7870,6 +7917,8 @@ bool JRD_shutdown_database(Database* dbb, const unsigned flags)
fb_assert(!dbb->locked());
WorkerAttachment::shutdownDbb(dbb);
try
{
#ifdef SUPERSERVER_V2
@ -8338,6 +8387,9 @@ static void purge_attachment(thread_db* tdbb, StableAttachmentPart* sAtt, unsign
if (attachment->att_flags & ATT_overwrite_check)
shutdownFlags |= SHUT_DBB_OVERWRITE_CHECK;
if (!(attachment->att_flags & ATT_worker))
WorkerAttachment::decUserAtts(dbb->dbb_filename);
// Unlink attachment from database
release_attachment(tdbb, attachment);

View File

@ -2655,6 +2655,27 @@ void MET_lookup_index_expression(thread_db* tdbb, jrd_rel* relation, index_desc*
}
bool MET_lookup_index_expression_blr(thread_db* tdbb, const MetaName& index_name, bid& blob_id)
{
SET_TDBB(tdbb);
Attachment* attachment = tdbb->getAttachment();
bool found = false;
AutoCacheRequest request(tdbb, irq_l_exp_index_blr, IRQ_REQUESTS);
FOR(REQUEST_HANDLE request)
IDX IN RDB$INDICES WITH
IDX.RDB$INDEX_NAME EQ index_name.c_str()
{
found = !IDX.RDB$EXPRESSION_BLR.NULL;
blob_id = IDX.RDB$EXPRESSION_BLR;
}
END_FOR;
return found;
}
bool MET_lookup_partner(thread_db* tdbb, jrd_rel* relation, index_desc* idx, const TEXT* index_name)
{
/**************************************

View File

@ -108,6 +108,7 @@ bool MET_lookup_generator_id(Jrd::thread_db*, SLONG, Jrd::MetaName&, bool* sysG
void MET_update_generator_increment(Jrd::thread_db* tdbb, SLONG gen_id, SLONG step);
void MET_lookup_index(Jrd::thread_db*, Jrd::MetaName&, const Jrd::MetaName&, USHORT);
void MET_lookup_index_expression(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::index_desc*);
bool MET_lookup_index_expression_blr(Jrd::thread_db* tdbb, const Jrd::MetaName& index_name, Jrd::bid& blob_id);
SLONG MET_lookup_index_name(Jrd::thread_db*, const Jrd::MetaName&, SLONG*, Jrd::IndexStatus* status);
bool MET_lookup_partner(Jrd::thread_db*, Jrd::jrd_rel*, struct Jrd::index_desc*, const TEXT*);
Jrd::jrd_prc* MET_lookup_procedure(Jrd::thread_db*, const Jrd::QualifiedName&, bool);

View File

@ -146,7 +146,7 @@ bool FullTableScan::getRecord(thread_db* tdbb) const
return false;
}
if (VIO_next_record(tdbb, rpb, request->req_transaction, request->req_pool, false))
if (VIO_next_record(tdbb, rpb, request->req_transaction, request->req_pool, DPM_next_all))
{
if (impure->irsb_upper.isValid() && rpb->rpb_number > impure->irsb_upper)
{

View File

@ -1092,7 +1092,7 @@ bool Applier::lookupRecord(thread_db* tdbb,
rpb.rpb_relation = relation;
rpb.rpb_number.setValue(BOF_NUMBER);
while (VIO_next_record(tdbb, &rpb, transaction, m_request->req_pool, false))
while (VIO_next_record(tdbb, &rpb, transaction, m_request->req_pool, DPM_next_all))
{
const auto seq_record = rpb.rpb_record;
fb_assert(seq_record);

View File

@ -125,6 +125,7 @@ const USHORT RPB_s_update = 0x01; // input stream fetched for update
const USHORT RPB_s_no_data = 0x02; // nobody is going to access the data
const USHORT RPB_s_sweeper = 0x04; // garbage collector - skip swept pages
const USHORT RPB_s_unstable = 0x08; // don't use undo log, used with unstable explicit cursors
const USHORT RPB_s_bulk = 0x10; // bulk operation (currently insert only)
// Runtime flags

View File

@ -315,27 +315,7 @@ void Sort::get(thread_db* tdbb, ULONG** record_address)
try
{
// If there weren't any runs, everything fit in memory. Just return stuff.
if (!m_merge)
{
while (true)
{
if (m_records == 0)
{
record = NULL;
break;
}
m_records--;
if ( (record = *m_next_pointer++) )
break;
}
}
else
{
record = getMerge(m_merge);
}
record = getRecord();
*record_address = (ULONG*) record;
if (record)
@ -1386,6 +1366,33 @@ sort_record* Sort::getMerge(merge_control* merge)
}
sort_record* Sort::getRecord()
{
sort_record* record = NULL;
// If there weren't any runs, everything fit in memory. Just return stuff.
if (!m_merge)
{
while (true)
{
if (m_records == 0)
{
record = NULL;
break;
}
m_records--;
if ((record = *m_next_pointer++))
break;
}
}
else
record = getMerge(m_merge);
return record;
}
void Sort::init()
{
/**************************************
@ -2172,3 +2179,236 @@ void Sort::sortRunsBySeek(int n)
}
run->run_next = tail;
}
/// class PartitionedSort
PartitionedSort::PartitionedSort(Database* dbb, SortOwner* owner) :
m_owner(owner),
m_parts(owner->getPool()),
m_nodes(owner->getPool()),
m_merge(NULL)
{
}
PartitionedSort::~PartitionedSort()
{
// for (ULONG p = 0; p < m_parts.getCount(); p++)
// delete m_parts[p].srt_sort;
}
void PartitionedSort::buidMergeTree()
{
ULONG count = m_parts.getCount();
if (count <= 0)
return;
MemoryPool& pool = m_owner->getPool();
HalfStaticArray<run_merge_hdr*, 8> streams(pool);
run_merge_hdr** m1 = streams.getBuffer(count);
for (sort_control* sort = m_parts.begin(); sort < m_parts.end(); sort++)
*m1++ = &sort->srt_header;
merge_control* node = m_nodes.getBuffer(count - 1);
while (count > 1)
{
run_merge_hdr** m2 = m1 = streams.begin();
// "m1" is used to sequence through the runs being merged,
// while "m2" points at the new merged run
while (count >= 2)
{
m_merge = node++;
m_merge->mrg_header.rmh_type = RMH_TYPE_MRG;
// garbage watch
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || ((*m1)->rmh_type == RMH_TYPE_SORT));
(*m1)->rmh_parent = m_merge;
m_merge->mrg_stream_a = *m1++;
// garbage watch
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || ((*m1)->rmh_type == RMH_TYPE_SORT));
(*m1)->rmh_parent = m_merge;
m_merge->mrg_stream_b = *m1++;
m_merge->mrg_record_a = NULL;
m_merge->mrg_record_b = NULL;
*m2++ = (run_merge_hdr*)m_merge;
count -= 2;
}
if (count)
*m2++ = *m1++;
count = m2 - streams.begin();
}
if (m_merge)
m_merge->mrg_header.rmh_parent = NULL;
}
void PartitionedSort::get(thread_db* tdbb, ULONG** record_address)
{
sort_record* record = NULL;
if (!m_merge)
record = m_parts[0].srt_sort->getRecord();
else
record = getMerge();
*record_address = (ULONG*)record;
if (record)
m_parts[0].srt_sort->diddleKey((UCHAR*)record->sort_record_key, false, true);
}
sort_record* PartitionedSort::getMerge()
{
Sort* aSort = m_parts[0].srt_sort;
merge_control* merge = m_merge;
sort_record* record = NULL;
bool eof = false;
while (merge)
{
// If node is a run_control, get the next record (or not) and back to parent
if (merge->mrg_header.rmh_type == RMH_TYPE_SORT)
{
sort_control* sort = (sort_control*)merge;
merge = sort->srt_header.rmh_parent;
// check for end-of-file condition in either direction
record = sort->srt_sort->getRecord();
if (!record)
{
record = (sort_record*)-1;
eof = true;
continue;
}
eof = false;
continue;
}
// If've we got a record, somebody asked for it. Find out who.
if (record)
{
if (merge->mrg_stream_a && !merge->mrg_record_a)
{
if (eof)
merge->mrg_stream_a = NULL;
else
merge->mrg_record_a = record;
}
else if (eof)
merge->mrg_stream_b = NULL;
else
merge->mrg_record_b = record;
}
// If either streams need a record and is still active, loop back to pick
// up the record. If either stream is dry, return the record of the other.
// If both are dry, indicate eof for this stream.
record = NULL;
eof = false;
if (!merge->mrg_record_a && merge->mrg_stream_a)
{
merge = (merge_control*)merge->mrg_stream_a;
continue;
}
if (!merge->mrg_record_b)
{
if (merge->mrg_stream_b) {
merge = (merge_control*)merge->mrg_stream_b;
}
else if ((record = merge->mrg_record_a))
{
merge->mrg_record_a = NULL;
merge = merge->mrg_header.rmh_parent;
}
else
{
eof = true;
record = (sort_record*)-1;
merge = merge->mrg_header.rmh_parent;
}
continue;
}
if (!merge->mrg_record_a)
{
record = merge->mrg_record_b;
merge->mrg_record_b = NULL;
merge = merge->mrg_header.rmh_parent;
continue;
}
// We have prospective records from each of the sub-streams. Compare them.
// If equal, offer each to user routine for possible sacrifice.
SORTP *p = merge->mrg_record_a->sort_record_key;
SORTP *q = merge->mrg_record_b->sort_record_key;
//l = m_key_length;
ULONG l = aSort->m_unique_length;
DO_32_COMPARE(p, q, l);
if (l == 0 && aSort->m_dup_callback)
{
UCHAR* rec_a = (UCHAR*)merge->mrg_record_a;
UCHAR* rec_b = (UCHAR*)merge->mrg_record_a;
aSort->diddleKey(rec_a, false, true);
aSort->diddleKey(rec_b, false, true);
if ((*aSort->m_dup_callback) ((const UCHAR*)merge->mrg_record_a,
(const UCHAR*)merge->mrg_record_b,
aSort->m_dup_callback_arg))
{
merge->mrg_record_a = NULL;
aSort->diddleKey(rec_b, true, true);
continue;
}
aSort->diddleKey(rec_a, true, true);
aSort->diddleKey(rec_b, true, true);
}
if (l == 0)
{
l = aSort->m_key_length - aSort->m_unique_length;
if (l != 0)
DO_32_COMPARE(p, q, l);
}
if (p[-1] < q[-1])
{
record = merge->mrg_record_a;
merge->mrg_record_a = NULL;
}
else
{
record = merge->mrg_record_b;
merge->mrg_record_b = NULL;
}
merge = merge->mrg_header.rmh_parent;
}
// Merge pointer is null; we're done. Return either the most
// recent record, or end of file, as appropriate.
return eof ? NULL : record;
}

View File

@ -33,6 +33,7 @@ namespace Jrd {
// Forward declaration
class Attachment;
class Sort;
class SortOwner;
struct merge_control;
@ -221,6 +222,7 @@ struct run_merge_hdr
const int RMH_TYPE_RUN = 0;
const int RMH_TYPE_MRG = 1;
const int RMH_TYPE_SORT = 2;
// Run control block
@ -253,13 +255,27 @@ struct merge_control
run_merge_hdr* mrg_stream_b;
};
// Sort control block, for partitioned sort
struct sort_control
{
run_merge_hdr srt_header;
Sort* srt_sort;
};
// Sort class
typedef bool (*FPTR_REJECT_DUP_CALLBACK)(const UCHAR*, const UCHAR*, void*);
// flags as set in m_flags
const int scb_sorted = 1; // stream has been sorted
const int scb_reuse_buffer = 2; // reuse buffer if possible
class Sort
{
friend class PartitionedSort;
public:
Sort(Database*, SortOwner*,
ULONG, FB_SIZE_T, FB_SIZE_T, const sort_key_def*,
@ -270,6 +286,11 @@ public:
void put(Jrd::thread_db*, ULONG**);
void sort(Jrd::thread_db*);
bool isSorted() const
{
return m_flags & scb_sorted;
}
static FB_UINT64 readBlock(TempSpace* space, FB_UINT64 seek, UCHAR* address, ULONG length)
{
const size_t bytes = space->read(seek, address, length);
@ -290,6 +311,7 @@ private:
void diddleKey(UCHAR*, bool, bool);
sort_record* getMerge(merge_control*);
sort_record* getRecord();
ULONG allocate(ULONG, ULONG, bool);
void init();
void mergeRuns(USHORT);
@ -333,10 +355,36 @@ private:
Firebird::Array<sort_key_def> m_description;
};
// flags as set in m_flags
const int scb_sorted = 1; // stream has been sorted
const int scb_reuse_buffer = 2; // reuse buffer if possible
class PartitionedSort
{
public:
PartitionedSort(Database*, SortOwner*);
~PartitionedSort();
void get(Jrd::thread_db*, ULONG**);
void addPartition(Sort* sort)
{
sort_control item;
item.srt_header.rmh_type = RMH_TYPE_SORT;
item.srt_header.rmh_parent = NULL;
item.srt_sort = sort;
m_parts.add(item);
}
void buidMergeTree();
private:
sort_record* getMerge();
SortOwner* m_owner;
Firebird::HalfStaticArray<sort_control, 8> m_parts;
Firebird::HalfStaticArray<merge_control, 8> m_nodes; // nodes of merge tree
merge_control* m_merge; // root of merge tree
};
class SortOwner
{

View File

@ -2908,6 +2908,7 @@ bool Service::process_switches(ClumpletReader& spb, string& switches)
get_action_svc_data(spb, burp_database, bigint);
break;
case isc_spb_bkp_factor:
case isc_spb_bkp_parallel_workers:
case isc_spb_res_buffers:
case isc_spb_res_page_size:
case isc_spb_verbint:
@ -2991,6 +2992,7 @@ bool Service::process_switches(ClumpletReader& spb, string& switches)
case isc_spb_rpr_commit_trans:
case isc_spb_rpr_rollback_trans:
case isc_spb_rpr_recover_two_phase:
case isc_spb_rpr_par_workers:
if (!get_action_svc_parameter(spb.getClumpTag(), alice_in_sw_table, switches))
{
return false;

View File

@ -4167,13 +4167,12 @@ TraceSweepEvent::TraceSweepEvent(thread_db* tdbb)
TraceManager* trace_mgr = att->att_trace_manager;
m_start_clock = fb_utils::query_performance_counter();
m_need_trace = trace_mgr->needs(ITraceFactory::TRACE_EVENT_SWEEP);
if (!m_need_trace)
return;
m_start_clock = fb_utils::query_performance_counter();
TraceConnectionImpl conn(att);
trace_mgr->event_sweep(&conn, &m_sweep_info, ITracePlugin::SWEEP_STATE_STARTED);
}
@ -4242,12 +4241,19 @@ void TraceSweepEvent::report(ntrace_process_state_t state)
{
Attachment* att = m_tdbb->getAttachment();
const SINT64 finiTime = fb_utils::query_performance_counter() - m_start_clock;
if (state == ITracePlugin::SWEEP_STATE_FINISHED)
{
const SINT64 timeMs = finiTime * 1000 / fb_utils::query_performance_frequency();
gds__log("Sweep is finished\n"
"\tDatabase \"%s\" \n"
"\t%i workers, time %" SLONGFORMAT ".%03d sec \n"
"\tOIT %" SQUADFORMAT", OAT %" SQUADFORMAT", OST %" SQUADFORMAT", Next %" SQUADFORMAT,
att->att_filename.c_str(),
att->att_parallel_workers,
(int) timeMs / 1000, (unsigned int) timeMs % 1000,
m_sweep_info.getOIT(),
m_sweep_info.getOAT(),
m_sweep_info.getOST(),
@ -4268,9 +4274,7 @@ void TraceSweepEvent::report(ntrace_process_state_t state)
jrd_tra* tran = m_tdbb->getTransaction();
TraceRuntimeStats stats(att, &m_base_stats, &att->att_stats,
fb_utils::query_performance_counter() - m_start_clock,
0);
TraceRuntimeStats stats(att, &m_base_stats, &att->att_stats, finiTime, 0);
m_sweep_info.setPerf(stats.getPerf());
trace_mgr->event_sweep(&conn, &m_sweep_info, state);

View File

@ -89,6 +89,8 @@
#include "../jrd/GarbageCollector.h"
#include "../jrd/trace/TraceManager.h"
#include "../jrd/trace/TraceJrdHelpers.h"
#include "../common/Task.h"
#include "../jrd/WorkerAttachment.h"
using namespace Jrd;
using namespace Firebird;
@ -175,6 +177,394 @@ static bool set_security_class(thread_db*, Record*, USHORT);
static void set_system_flag(thread_db*, Record*, USHORT);
static void verb_post(thread_db*, jrd_tra*, record_param*, Record*);
namespace Jrd
{
class SweepTask : public Task
{
struct RelInfo; // forward decl
public:
SweepTask(thread_db* tdbb, MemoryPool* pool, TraceSweepEvent* traceSweep) : Task(),
m_pool(pool),
m_dbb(NULL),
m_trace(traceSweep),
m_items(*m_pool),
m_stop(false),
m_nextRelID(0),
m_lastRelID(0),
m_relInfo(*m_pool)
{
m_dbb = tdbb->getDatabase();
Attachment* att = tdbb->getAttachment();
int workers = 1;
if (att->att_parallel_workers > 0)
workers = att->att_parallel_workers;
for (int i = 0; i < workers; i++)
m_items.add(FB_NEW_POOL(*m_pool) Item(this));
m_items[0]->m_ownAttach = false;
m_items[0]->m_attStable = att->getStable();
m_items[0]->m_tra = tdbb->getTransaction();
m_relInfo.grow(m_items.getCount());
m_lastRelID = att->att_relations->count();
};
virtual ~SweepTask()
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
delete *p;
};
class Item : public Task::WorkItem
{
public:
Item(SweepTask* task) : Task::WorkItem(task),
m_inuse(false),
m_ownAttach(true),
m_tra(NULL),
m_relInfo(NULL),
m_firstPP(0),
m_lastPP(0)
{}
virtual ~Item()
{
if (!m_ownAttach || !m_attStable)
return;
Attachment* att = NULL;
{
AttSyncLockGuard guard(*m_attStable->getSync(), FB_FUNCTION);
att = m_attStable->getHandle();
if (!att)
return;
fb_assert(att->att_use_count > 0);
}
FbLocalStatus status;
if (m_tra)
{
BackgroundContextHolder tdbb(att->att_database, att, &status, FB_FUNCTION);
TRA_commit(tdbb, m_tra, false);
}
WorkerAttachment::releaseAttachment(&status, m_attStable);
}
SweepTask* getSweepTask() const
{
return reinterpret_cast<SweepTask*> (m_task);
}
bool init(thread_db* tdbb)
{
FbStatusVector* status = tdbb->tdbb_status_vector;
Attachment* att = NULL;
if (m_ownAttach && !m_attStable.hasData())
m_attStable = WorkerAttachment::getAttachment(status, getSweepTask()->m_dbb);
if (m_attStable)
att = m_attStable->getHandle();
if (!att)
{
Arg::Gds(isc_bad_db_handle).copyTo(status);
return false;
}
tdbb->setDatabase(att->att_database);
tdbb->setAttachment(att);
if (m_ownAttach && !m_tra)
{
const UCHAR sweep_tpb[] =
{
isc_tpb_version1, isc_tpb_read,
isc_tpb_read_committed, isc_tpb_rec_version
};
try
{
WorkerContextHolder holder(tdbb, FB_FUNCTION);
m_tra = TRA_start(tdbb, sizeof(sweep_tpb), sweep_tpb);
DPM_scan_pages(tdbb);
}
catch(const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
return false;
}
}
tdbb->setTransaction(m_tra);
tdbb->tdbb_flags |= TDBB_sweeper;
return true;
}
bool m_inuse;
bool m_ownAttach;
RefPtr<StableAttachmentPart> m_attStable;
jrd_tra* m_tra;
// part of work: relation, first and last PP's to work on
RelInfo* m_relInfo;
ULONG m_firstPP;
ULONG m_lastPP;
};
bool handler(WorkItem& _item);
bool getWorkItem(WorkItem** pItem);
bool getResult(IStatus* status)
{
if (status)
{
status->init();
status->setErrors(m_status.getErrors());
}
return m_status.isSuccess();
}
int getMaxWorkers()
{
return m_items.getCount();
}
private:
// item is handled, get next portion of work and update RelInfo
// also, detect if relation is handled completely
// return true if there is some more work to do
bool updateRelInfo(Item* item)
{
RelInfo* relInfo = item->m_relInfo;
if (relInfo->countPP == 0 || relInfo->nextPP >= relInfo->countPP)
{
relInfo->workers--;
return false;
}
item->m_firstPP = relInfo->nextPP;
item->m_lastPP = item->m_firstPP;
if (item->m_lastPP >= relInfo->countPP)
item->m_lastPP = relInfo->countPP - 1;
relInfo->nextPP = item->m_lastPP + 1;
return true;
}
void setError(IStatus* status, bool stopTask)
{
const bool copyStatus = (m_status.isSuccess() && status && status->getState() == IStatus::STATE_ERRORS);
if (!copyStatus && (!stopTask || m_stop))
return;
MutexLockGuard guard(m_mutex, FB_FUNCTION);
if (m_status.isSuccess() && copyStatus)
m_status.save(status);
if (stopTask)
m_stop = true;
}
MemoryPool* m_pool;
Database* m_dbb;
TraceSweepEvent* m_trace;
Mutex m_mutex;
HalfStaticArray<Item*, 8> m_items;
StatusHolder m_status;
volatile bool m_stop;
struct RelInfo
{
RelInfo()
{
memset(this, 0, sizeof(*this));
}
USHORT rel_id;
ULONG countPP; // number of pointer pages in relation
ULONG nextPP; // number of PP to assign to next worker
ULONG workers; // number of workers for this relation
};
USHORT m_nextRelID; // next relation to work on
USHORT m_lastRelID; // last relation to work on
HalfStaticArray<RelInfo, 8> m_relInfo; // relations worked on
};
bool SweepTask::handler(WorkItem& _item)
{
Item* item = reinterpret_cast<Item*>(&_item);
ThreadContextHolder tdbb(NULL);
if (!item->init(tdbb))
{
setError(tdbb->tdbb_status_vector, true);
return false;
}
WorkerContextHolder wrkHolder(tdbb, FB_FUNCTION);
record_param rpb;
jrd_rel* relation = NULL;
try
{
RelInfo* relInfo = item->m_relInfo;
Database* dbb = tdbb->getDatabase();
Attachment* att = tdbb->getAttachment();
/*relation = (*att->att_relations)[relInfo->rel_id];
if (relation)*/
relation = MET_lookup_relation_id(tdbb, relInfo->rel_id, false);
if (relation &&
!(relation->rel_flags & (REL_deleted | REL_deleting)) &&
!relation->isTemporary() &&
relation->getPages(tdbb)->rel_pages)
{
jrd_rel::GCShared gcGuard(tdbb, relation);
if (!gcGuard.gcEnabled())
{
string str;
str.printf("Acquire garbage collection lock failed (%s)", relation->rel_name.c_str());
status_exception::raise(Arg::Gds(isc_random) << Arg::Str(str));
}
jrd_tra* tran = tdbb->getTransaction();
if (relInfo->countPP == 0)
relInfo->countPP = relation->getPages(tdbb)->rel_pages->count();
rpb.rpb_relation = relation;
rpb.rpb_org_scans = relation->rel_scan_count++;
rpb.rpb_record = NULL;
rpb.rpb_stream_flags = RPB_s_no_data | RPB_s_sweeper;
rpb.getWindow(tdbb).win_flags = WIN_large_scan;
rpb.rpb_number.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_firstPP);
rpb.rpb_number.decrement();
RecordNumber lastRecNo;
lastRecNo.compose(dbb->dbb_max_records, dbb->dbb_dp_per_pp, 0, 0, item->m_lastPP + 1);
lastRecNo.decrement();
while (VIO_next_record(tdbb, &rpb, tran, NULL, DPM_next_pointer_page))
{
CCH_RELEASE(tdbb, &rpb.getWindow(tdbb));
if (relation->rel_flags & REL_deleting)
break;
if (rpb.rpb_number >= lastRecNo)
break;
if (m_stop)
break;
JRD_reschedule(tdbb);
tran->tra_oldest_active = dbb->dbb_oldest_snapshot;
}
delete rpb.rpb_record;
--relation->rel_scan_count;
}
return !m_stop;
}
catch(const Exception& ex)
{
ex.stuffException(tdbb->tdbb_status_vector);
delete rpb.rpb_record;
if (relation)
{
if (relation->rel_scan_count) {
--relation->rel_scan_count;
}
}
}
setError(tdbb->tdbb_status_vector, true);
return false;
}
bool SweepTask::getWorkItem(WorkItem** pItem)
{
MutexLockGuard guard(m_mutex, FB_FUNCTION);
Item* item = reinterpret_cast<Item*> (*pItem);
if (item == NULL)
{
for (Item** p = m_items.begin(); p < m_items.end(); p++)
if (!(*p)->m_inuse)
{
(*p)->m_inuse = true;
*pItem = item = *p;
break;
}
}
else if (updateRelInfo(item))
return true;
if (!item)
return false;
// assign part of task to item
if (m_nextRelID >= m_lastRelID)
{
// find not handled relation and help to handle it
RelInfo* relInfo = m_relInfo.begin();
for (; relInfo < m_relInfo.end(); relInfo++)
if (relInfo->workers > 0)
{
item->m_relInfo = relInfo;
relInfo->workers++;
if (updateRelInfo(item))
return true;
}
item->m_inuse = false;
return false;
}
// start to handle next relation
USHORT relID = m_nextRelID++;
RelInfo* relInfo = m_relInfo.begin();
for (; relInfo < m_relInfo.end(); relInfo++)
if (relInfo->workers == 0)
{
relInfo->workers++;
relInfo->rel_id = relID;
relInfo->countPP = 0;
item->m_relInfo = relInfo;
item->m_firstPP = item->m_lastPP = 0;
relInfo->nextPP = item->m_lastPP + 1;
return true;
}
item->m_inuse = false;
return false;
}
}; // namespace Jrd
static bool assert_gc_enabled(const jrd_tra* transaction, const jrd_rel* relation)
{
/**************************************
@ -3275,7 +3665,7 @@ bool VIO_next_record(thread_db* tdbb,
record_param* rpb,
jrd_tra* transaction,
MemoryPool* pool,
bool onepage)
FindNextRecordScope scope)
{
/**************************************
*
@ -3311,7 +3701,7 @@ bool VIO_next_record(thread_db* tdbb,
#endif
do {
if (!DPM_next(tdbb, rpb, lock_type, onepage))
if (!DPM_next(tdbb, rpb, lock_type, scope))
{
return false;
}
@ -3899,6 +4289,25 @@ bool VIO_sweep(thread_db* tdbb, jrd_tra* transaction, TraceSweepEvent* traceSwee
DPM_scan_pages(tdbb);
if (attachment->att_parallel_workers != 0)
{
EngineCheckout cout(tdbb, FB_FUNCTION);
Coordinator coord(dbb->dbb_permanent);
SweepTask sweep(tdbb, dbb->dbb_permanent, traceSweep);
FbLocalStatus local_status;
local_status->init();
coord.runSync(&sweep);
if (!sweep.getResult(&local_status))
local_status.raise();
return true;
}
// hvlad: restore tdbb->transaction since it can be used later
tdbb->setTransaction(transaction);
@ -3943,7 +4352,7 @@ bool VIO_sweep(thread_db* tdbb, jrd_tra* transaction, TraceSweepEvent* traceSwee
gc->sweptRelation(transaction->tra_oldest_active, relation->rel_id);
}
while (VIO_next_record(tdbb, &rpb, transaction, 0, false))
while (VIO_next_record(tdbb, &rpb, transaction, 0, DPM_next_all))
{
CCH_RELEASE(tdbb, &rpb.getWindow(tdbb));
@ -3966,7 +4375,7 @@ bool VIO_sweep(thread_db* tdbb, jrd_tra* transaction, TraceSweepEvent* traceSwee
delete rpb.rpb_record;
} // try
catch (const Firebird::Exception&)
catch (const Exception&)
{
delete rpb.rpb_record;
@ -4892,7 +5301,7 @@ void Database::garbage_collector(Database* dbb)
bool rel_exit = false;
while (VIO_next_record(tdbb, &rpb, transaction, NULL, true))
while (VIO_next_record(tdbb, &rpb, transaction, NULL, DPM_next_data_page))
{
CCH_RELEASE(tdbb, &rpb.getWindow(tdbb));

View File

@ -26,7 +26,8 @@
#ifndef JRD_VIO_PROTO_H
#define JRD_VIO_PROTO_H
namespace Jrd {
namespace Jrd
{
class jrd_rel;
class jrd_tra;
class Record;
@ -35,6 +36,13 @@ namespace Jrd {
class Savepoint;
class Format;
class TraceSweepEvent;
enum FindNextRecordScope
{
DPM_next_all, // all pages
DPM_next_data_page, // one data page only
DPM_next_pointer_page // data pages from one pointer page
};
}
void VIO_backout(Jrd::thread_db*, Jrd::record_param*, const Jrd::jrd_tra*);
@ -52,7 +60,7 @@ bool VIO_get_current(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*,
void VIO_init(Jrd::thread_db*);
bool VIO_writelock(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*);
bool VIO_modify(Jrd::thread_db*, Jrd::record_param*, Jrd::record_param*, Jrd::jrd_tra*);
bool VIO_next_record(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*, MemoryPool*, bool);
bool VIO_next_record(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*, MemoryPool*, Jrd::FindNextRecordScope);
Jrd::Record* VIO_record(Jrd::thread_db*, Jrd::record_param*, const Jrd::Format*, MemoryPool*);
bool VIO_refetch_record(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*, bool, bool);
void VIO_store(Jrd::thread_db*, Jrd::record_param*, Jrd::jrd_tra*);

View File

@ -2456,6 +2456,7 @@ void DatabaseAuth::accept(PACKET* send, Auth::WriterImplementation* authBlock)
// remove tags for specific internal attaches
case isc_dpb_map_attach:
case isc_dpb_sec_attach:
case isc_dpb_worker_attach:
// remove client's config information
case isc_dpb_config:

View File

@ -423,6 +423,8 @@ const SvcSwitches backupOptions[] =
{"bkp_keyname", putStringArgument, 0, isc_spb_bkp_keyname, 0 },
{"bkp_crypt", putStringArgument, 0, isc_spb_bkp_crypt, 0 },
{"bkp_zip", putOption, 0, isc_spb_bkp_zip, 0 },
{"bkp_parallel_workers", putIntArgument, 0, isc_spb_bkp_parallel_workers, 0},
{"bkp_direct_io", putOption, 0, isc_spb_bkp_direct_io, 0},
{0, 0, 0, 0, 0}
};
@ -453,6 +455,8 @@ const SvcSwitches restoreOptions[] =
{"res_keyname", putStringArgument, 0, isc_spb_res_keyname, 0 },
{"res_crypt", putStringArgument, 0, isc_spb_res_crypt, 0 },
{"res_replica_mode", putReplicaMode, 0, isc_spb_res_replica_mode, 0},
{"res_parallel_workers", putIntArgument, 0, isc_spb_res_parallel_workers, 0},
{"res_direct_io", putOption, 0, isc_spb_res_direct_io, 0},
{0, 0, 0, 0, 0}
};
@ -498,6 +502,7 @@ const SvcSwitches repairOptions[] =
{"rpr_sweep_db", putOption, 0, isc_spb_rpr_sweep_db, 0},
{"rpr_list_limbo_trans", putOption, 0, isc_spb_rpr_list_limbo_trans, isc_info_svc_limbo_trans},
{"rpr_icu", putOption, 0, isc_spb_rpr_icu, 0},
{"rpr_par_workers", putIntArgument, 0, isc_spb_rpr_par_workers, 0},
{0, 0, 0, 0, 0}
};