8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-22 22:43:03 +01:00

Postfix for #6959 - restore failed due to zero buffer size in IBatch

This commit is contained in:
AlexPeshkoff 2021-09-13 19:39:00 +03:00
parent 4d28881a6c
commit 65a0459123
5 changed files with 91 additions and 121 deletions

View File

@ -118,7 +118,6 @@ void fix_security_class_name(BurpGlobals* tdgbl, TEXT* sec_class, bool is_field)
bool get_acl(BurpGlobals* tdgbl, const TEXT*, ISC_QUAD*, ISC_QUAD*);
void get_array(BurpGlobals* tdgbl, burp_rel*, UCHAR*);
void get_blob(BurpGlobals* tdgbl, Firebird::IBatch* batch, const burp_fld*, UCHAR*);
void get_blob_old(BurpGlobals* tdgbl, const burp_fld*, UCHAR*);
void get_blr_blob(BurpGlobals* tdgbl, ISC_QUAD&, bool);
bool get_character_set(BurpGlobals* tdgbl);
bool get_chk_constraint(BurpGlobals* tdgbl);
@ -2098,6 +2097,12 @@ void get_array(BurpGlobals* tdgbl, burp_rel* relation, UCHAR* record_buffer)
BURP_free (xdr_buffer.lstr_address);
}
static const unsigned GBAK_BATCH_STEP = 1000;
static const unsigned GBAK_BATCH_BLOBSTEP = 1024; // keep slightly bigger than GBAK_BATCH_STEP
static unsigned batchBufferSize = 0;
void get_blob(BurpGlobals* tdgbl, Firebird::IBatch* batch, const burp_fld* fields, UCHAR* record_buffer)
{
/**************************************
@ -2163,16 +2168,58 @@ void get_blob(BurpGlobals* tdgbl, Firebird::IBatch* batch, const burp_fld* field
// msg 36 Can't find field for blob
}
// Create new blob
// Choose blob creation method
FbLocalStatus status_vector;
bool blobInline = false;
if (batch)
{
if (!batchBufferSize)
{
UCHAR item = Firebird::IBatch::INF_BUFFER_BYTES_SIZE;
UCHAR infoBuf[64];
batch->getInfo(&status_vector, 1, &item, sizeof infoBuf, infoBuf);
if (status_vector->hasData())
{
if (status_vector->getErrors()[1] == isc_interface_version_too_old) // FB4.0.0
batchBufferSize = 256 * 1024 * 1024;
else
BURP_error_redirect(&status_vector, 405);
}
if (!batchBufferSize)
{
Firebird::ClumpletReader rdr(Firebird::ClumpletReader::InfoResponse, infoBuf, sizeof infoBuf);
rdr.rewind();
if (rdr.isEof() || rdr.getClumpTag() != item)
BURP_error(405, true);
else
batchBufferSize = rdr.getInt();
}
fb_assert(batchBufferSize);
}
if (FB_UINT64(segments) * max_segment < batchBufferSize / GBAK_BATCH_BLOBSTEP)
blobInline = true;
}
// Create new blob
ISC_QUAD* blob_id = (ISC_QUAD*) ((UCHAR*) record_buffer + field->fld_offset);
const UCHAR blob_desc[] = {isc_bpb_version1, isc_bpb_type, 1, blob_type};
BlobBuffer local_buffer;
UCHAR* const buffer = local_buffer.getBuffer(max_segment);
FbLocalStatus status_vector;
bool first = true;
BlobWrapper blob(&status_vector);
if (!blobInline)
{
if (!blob.create(DB, gds_trans, *blob_id, sizeof(blob_desc), blob_desc))
{
BURP_error_redirect(&status_vector, 37);
// msg 37 isc_create_blob failed
}
}
// Eat up blob segments
@ -2183,125 +2230,46 @@ void get_blob(BurpGlobals* tdgbl, Firebird::IBatch* batch, const burp_fld* field
if (length)
get_block(tdgbl, buffer, length);
if (first)
batch->addBlob(&status_vector, length, buffer, blob_id, sizeof(blob_desc), blob_desc);
else
batch->appendBlobData(&status_vector, length, buffer);
if (status_vector->hasData())
if (blobInline)
{
BURP_error_redirect(&status_vector, 370);
// msg 370 could not append BLOB data to batch
if (first)
batch->addBlob(&status_vector, length, buffer, blob_id, sizeof(blob_desc), blob_desc);
else
batch->appendBlobData(&status_vector, length, buffer);
if (status_vector->hasData())
{
BURP_error_redirect(&status_vector, 370);
// msg 370 could not append BLOB data to batch
}
}
first = false;
}
}
void get_blob_old(BurpGlobals* tdgbl, const burp_fld* fields, UCHAR* record_buffer)
{
/**************************************
*
* g e t _ b l o b
*
**************************************
*
* Functional description
* Read blob attributes and copy data from input file to nice,
* shiny, new blob.
*
**************************************/
// Pick up attributes
ULONG segments = 0;
USHORT field_number = MAX_USHORT;
USHORT max_segment = 0;
UCHAR blob_type = 0;
att_type attribute;
scan_attr_t scan_next_attr;
skip_init(&scan_next_attr);
while (skip_scan(&scan_next_attr), get_attribute(&attribute, tdgbl) != att_blob_data)
{
switch (attribute)
{
case att_blob_field_number:
field_number = (USHORT) get_int32(tdgbl);
break;
case att_blob_max_segment:
max_segment = (USHORT) get_int32(tdgbl);
break;
case att_blob_number_segments:
segments = get_int32(tdgbl);
break;
case att_blob_type:
blob_type = (UCHAR) get_int32(tdgbl);
break;
default:
bad_attribute(scan_next_attr, attribute, 64);
// msg 64 blob
break;
}
}
// Find the field associated with the blob
const burp_fld* field;
for (field = fields; field; field = field->fld_next)
{
if (field->fld_number == field_number)
break;
}
if (!field)
{
BURP_error_redirect(NULL, 36);
// msg 36 Can't find field for blob
}
// Create new blob
ISC_QUAD* blob_id = (ISC_QUAD*) ((UCHAR*) record_buffer + field->fld_offset);
FbLocalStatus status_vector;
BlobWrapper blob(&status_vector);
const UCHAR blob_desc[] = {isc_bpb_version1, isc_bpb_type, 1, blob_type};
if (!blob.create(DB, gds_trans, *blob_id, sizeof(blob_desc), blob_desc))
{
BURP_error_redirect(&status_vector, 37);
// msg 37 isc_create_blob failed
}
// Allocate blob buffer if static buffer is too short
BlobBuffer static_buffer;
UCHAR* const buffer = static_buffer.getBuffer(max_segment);
// Eat up blob segments
for (; segments > 0; --segments )
{
USHORT length = get(tdgbl);
length |= get(tdgbl) << 8;
if (length)
{
get_block(tdgbl, buffer, length);
}
if (!blob.putSegment(length, buffer))
else if (!blob.putSegment(length, buffer))
{
BURP_error_redirect(&status_vector, 38);
// msg 38 isc_put_segment failed
}
first = false;
}
if (!blob.close())
BURP_error_redirect(&status_vector, 23);
// msg 23 isc_close_blob failed
if (!blobInline)
{
if (!blob.close())
BURP_error_redirect(&status_vector, 23);
// msg 23 isc_close_blob failed
if (batch)
{
ISC_QUAD real_id = *blob_id;
batch->registerBlob(&status_vector, &real_id, blob_id);
if (status_vector->hasData())
{
BURP_error_redirect(&status_vector, 370);
// msg 370 could not append BLOB data to batch
}
}
}
}
@ -3177,7 +3145,6 @@ rec_type get_data(BurpGlobals* tdgbl, burp_rel* relation, bool skip_relation)
// Create batch
const int GBAK_BATCH_STEP = 1000;
Firebird::AutoDispose<Firebird::IXpbBuilder> pb(Firebird::UtilInterfacePtr()->
getXpbBuilder(&tdgbl->throwStatus, Firebird::IXpbBuilder::BATCH, NULL, 0));
pb->insertInt(&tdgbl->throwStatus, Firebird::IBatch::TAG_MULTIERROR, 1);
@ -3314,7 +3281,7 @@ rec_type get_data(BurpGlobals* tdgbl, burp_rel* relation, bool skip_relation)
}
batch->add(&tdgbl->throwStatus, 1, sql);
if ((records % 1000 != 0) && (record == rec_data))
if ((records % GBAK_BATCH_STEP != 0) && (record == rec_data))
continue;
Firebird::AutoDispose<Firebird::IBatchCompletionState> cs(batch->execute(&tdgbl->throwStatus, gds_trans));
@ -3831,7 +3798,7 @@ rec_type get_data_old(BurpGlobals* tdgbl, burp_rel* relation)
while (record == rec_blob || record == rec_array)
{
if (record == rec_blob)
get_blob_old(tdgbl, relation->rel_fields, (UCHAR *) buffer);
get_blob(tdgbl, nullptr, relation->rel_fields, (UCHAR *) buffer);
else if (record == rec_array)
get_array(tdgbl, relation, (UCHAR *) buffer);
get_record(&record, tdgbl);

View File

@ -133,7 +133,7 @@ public:
if (rc == 1 && kind != UnTagged && kind != SpbStart &&
kind != WideUnTagged && kind != SpbSendItems &&
kind != SpbReceiveItems && kind != SpbResponse &&
kind != InfoResponse)
kind != InfoResponse && kind != InfoItems)
{
rc = 0;
}

View File

@ -124,6 +124,8 @@ DsqlBatch::DsqlBatch(dsql_req* req, const dsql_msg* /*message*/, IMessageMetadat
m_bufferSize = pb.getInt();
if (m_bufferSize > HARD_BUFFER_LIMIT)
m_bufferSize = HARD_BUFFER_LIMIT;
if (!m_bufferSize)
m_bufferSize = HARD_BUFFER_LIMIT;
break;
}
}
@ -817,7 +819,7 @@ void DsqlBatch::DataCache::put3(const void* data, ULONG dataSize, ULONG offset)
void DsqlBatch::DataCache::put(const void* d, ULONG dataSize)
{
if (m_limit && (m_used + m_cache.getCount() + dataSize > m_limit))
if (m_used + m_cache.getCount() + dataSize > m_limit)
ERR_post(Arg::Gds(isc_batch_too_big));
const UCHAR* data = reinterpret_cast<const UCHAR*>(d);

View File

@ -9,7 +9,7 @@ set bulk_insert INSERT INTO FACILITIES (LAST_CHANGE, FACILITY, FAC_CODE, MAX_NUM
('2018-06-22 11:46:00', 'DYN', 8, 309)
('1996-11-07 13:39:40', 'INSTALL', 10, 1)
('1996-11-07 13:38:41', 'TEST', 11, 4)
('2021-02-04 11:21:00', 'GBAK', 12, 405)
('2021-09-13 15:40:00', 'GBAK', 12, 406)
('2019-04-13 21:10:00', 'SQLERR', 13, 1047)
('1996-11-07 13:38:42', 'SQLWARN', 14, 613)
('2018-02-27 14:50:31', 'JRD_BUGCHK', 15, 307)

View File

@ -1972,6 +1972,7 @@ ERROR: Backup incomplete', NULL, NULL);
(NULL, 'get_pub_table', 'restore.epp', NULL, 12, 402, NULL, 'publication for table', NULL, NULL);
('gbak_opt_replica', 'burp_usage', 'burp.c', NULL, 12, 403, NULL, ' @1REPLICA <mode> "none", "read_only" or "read_write" replica mode', NULL, NULL);
('gbak_replica_req', 'BURP_gbak', 'burp.c', NULL, 12, 404, NULL, '"none", "read_only" or "read_write" required', NULL, NULL);
(NULL, 'get_blob', 'restore.epp', NULL, 12, 405, NULL, 'could not access batch parameters', NULL, NULL);
-- SQLERR
(NULL, NULL, NULL, NULL, 13, 1, NULL, 'Firebird error', NULL, NULL);
(NULL, NULL, NULL, NULL, 13, 74, NULL, 'Rollback not performed', NULL, NULL);