8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-22 22:43:03 +01:00

This makes concurrent bulk inserts to not fight for the same data page.

This is especially critical for CS performance.
This commit is contained in:
Vlad Khorsun 2022-06-07 11:56:32 +03:00
parent 1e082b5e29
commit 7227b9af2d
5 changed files with 28 additions and 2 deletions

View File

@ -11966,7 +11966,12 @@ void WriteRelationMeta::prepareRequest(BurpGlobals* tdgbl)
}
else
add_byte(blr, blr_store);
//add_byte(blr, blr_store_bulk); // fixme
// Mark this store operation as bulk one
add_byte(blr, blr_marks);
add_byte(blr, 1);
add_byte(blr, 0x10); // must be Jrd::StatementNode::MARK_BULK_INSERT
add_byte(blr, blr_relation);
add_string(blr, m_relation->rel_name);
add_byte(blr, 0); // context variable

View File

@ -1438,6 +1438,7 @@ public:
static const unsigned MARK_MERGE = 0x02; // node is part of MERGE statement
static const unsigned MARK_FOR_UPDATE = 0x04; // implicit cursor used in UPDATE\DELETE\MERGE statement
static const unsigned MARK_AVOID_COUNTERS = 0x08; // do not touch record counters
static const unsigned MARK_BULK_INSERT = 0x10; // StoreNode is used for bulk operation
struct ExeState
{

View File

@ -8033,6 +8033,9 @@ const StmtNode* StoreNode::store(thread_db* tdbb, Request* request, WhichTrigger
record_param* rpb = &request->req_rpb[stream];
jrd_rel* relation = rpb->rpb_relation;
if ((marks & MARK_BULK_INSERT) || request->req_batch_mode)
rpb->rpb_stream_flags |= RPB_s_bulk;
const auto localTableSource = nodeAs<LocalTableSourceNode>(target);
const auto localTable = localTableSource ?
request->getStatement()->localTables[localTableSource->tableNumber] :

View File

@ -3337,9 +3337,17 @@ static rhd* locate_space(thread_db* tdbb,
ULONG pp_sequence =
(type == DPM_primary ? relPages->rel_pri_data_space : relPages->rel_sec_data_space);
const bool bulkInsert = (type == DPM_primary) && (rpb->rpb_stream_flags & RPB_s_bulk);
for (;; pp_sequence++)
{
locklevel_t ppLock = LCK_read;
// Bulk inserts looks up for empty DP only to avoid contention with
// another attachments doing bulk inserts. Note, DP number is saved in
// relPages->rel_last_free_pri_dp and next insert by same attachment
// will use same DP while concurrent bulk attachments will ignore it as
// non-empty. Take write lock on PP early to clear 'empty' flag.
locklevel_t ppLock = bulkInsert ? LCK_write : LCK_read;
if (type == DPM_primary)
relPages->rel_pri_data_space = pp_sequence;
@ -3380,6 +3388,9 @@ static rhd* locate_space(thread_db* tdbb,
bool dp_is_empty = PPG_DP_BIT_TEST(bits, slot, ppg_dp_empty);
bool dp_is_secondary = PPG_DP_BIT_TEST(bits, slot, ppg_dp_secondary);
if (bulkInsert && !dp_is_empty)
continue;
if (dp_is_empty)
{
if (ppLock == LCK_read)
@ -3459,7 +3470,12 @@ static rhd* locate_space(thread_db* tdbb,
space = find_space(tdbb, rpb, size, stack, record, type);
if (space)
{
if (DPM_primary)
relPages->rel_last_free_pri_dp = window->win_page.getPageNum();
break;
}
}
if (i == 20)

View File

@ -125,6 +125,7 @@ const USHORT RPB_s_update = 0x01; // input stream fetched for update
const USHORT RPB_s_no_data = 0x02; // nobody is going to access the data
const USHORT RPB_s_sweeper = 0x04; // garbage collector - skip swept pages
const USHORT RPB_s_unstable = 0x08; // don't use undo log, used with unstable explicit cursors
const USHORT RPB_s_bulk = 0x10; // bulk operation (currently insert only)
// Runtime flags