8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-22 22:43:03 +01:00

Fixed #7745: Error restoring database which has system domains in user table with BLOBs using embedded connection

This commit is contained in:
AlexPeshkoff 2023-09-21 19:19:23 +03:00
parent e11eb93cc2
commit 1d02248f81
2 changed files with 33 additions and 11 deletions

View File

@ -265,7 +265,7 @@ void DsqlBatch::add(thread_db* tdbb, ULONG count, const void* inBuffer)
return;
m_messages.align(m_alignment);
m_messages.put(inBuffer, (count - 1) * m_alignedMessage + m_messageSize);
DEB_BATCH(fprintf(stderr, "Put to batch %d messages\n", count));
//DEB_BATCH(fprintf(stderr, "Put to batch %d messages\n", count));
}
void DsqlBatch::blobCheckMeta()
@ -308,6 +308,7 @@ void DsqlBatch::blobSetSize()
m_blobs.put3(&blobSize, sizeof(blobSize), m_lastBlob + sizeof(ISC_QUAD));
m_setBlobSize = false;
}
DEB_BATCH(fprintf(stderr, "blobSetSize %u\n", blobSize));
}
void DsqlBatch::blobPrepare()
@ -357,6 +358,8 @@ void DsqlBatch::addBlob(thread_db* tdbb, ULONG length, const void* inBuffer, ISC
ULONG fullLength = length + parLength;
m_blobs.put(&fullLength, sizeof(ULONG));
m_blobs.put(&parLength, sizeof(ULONG));
DEB_BATCH(fprintf(stderr, "addBlob %08x.%08x par %u full %u ",
blobId->gds_quad_high, blobId->gds_quad_low, parLength, fullLength));
// Store BPB
if (parLength)
@ -393,8 +396,11 @@ void DsqlBatch::putSegment(ULONG length, const void* inBuffer)
m_blobs.align(IBatch::BLOB_SEGHDR_ALIGN);
m_blobs.put(&l, sizeof(l));
m_setBlobSize = true;
DEB_BATCH(fprintf(stderr, "segment header, "));
}
m_blobs.put(inBuffer, length);
DEB_BATCH(fprintf(stderr, "segment data %u ", length));
}
void DsqlBatch::addBlobStream(thread_db* tdbb, unsigned length, const void* inBuffer)
@ -539,6 +545,10 @@ private:
ISC_QUAD batchBlobId = *reinterpret_cast<ISC_QUAD*>(flow.data);
ULONG* blobSize = reinterpret_cast<ULONG*>(flow.data + sizeof(ISC_QUAD));
ULONG* bpbSize = reinterpret_cast<ULONG*>(flow.data + sizeof(ISC_QUAD) + sizeof(ULONG));
DEB_BATCH(fprintf(stderr, "B-ID: %08x.%08x full=%u par=%u\n", batchBlobId.gds_quad_high, batchBlobId.gds_quad_low,
*blobSize, *bpbSize));
flow.newHdr(*blobSize);
ULONG currentBpbSize = *bpbSize;
@ -589,7 +599,6 @@ private:
blob = blb::create2(tdbb, transaction, &engineBlobId, bpb->getCount(),
bpb->begin(), true);
//DEB_BATCH(fprintf(stderr, "B-ID: (%x,%x)\n", batchBlobId.gds_quad_high, batchBlobId.gds_quad_low));
registerBlob(reinterpret_cast<ISC_QUAD*>(&engineBlobId), &batchBlobId);
}
}
@ -608,6 +617,8 @@ private:
flow.move(sizeof(USHORT));
dataSize = *segSize;
DEB_BATCH(fprintf(stderr, " Seg: %u\n", dataSize));
if (dataSize > flow.currentBlobSize)
{
ERRD_post(Arg::Gds(isc_sqlerr) << Arg::Num(-104) <<
@ -798,8 +809,6 @@ void DsqlBatch::DataCache::setBuf(ULONG size, ULONG cacheCapacity)
void DsqlBatch::DataCache::put3(const void* data, ULONG dataSize, ULONG offset)
{
// This assertion guarantees that data always fits as a whole into m_cache or m_space,
// never placed half in one storage, half - in another.
fb_assert((DsqlBatch::RAM_BATCH % dataSize == 0) && (offset % dataSize == 0));
if (offset >= m_used)
@ -812,6 +821,13 @@ void DsqlBatch::DataCache::put3(const void* data, ULONG dataSize, ULONG offset)
}
else
{
if (offset + dataSize > m_used)
{
// what a pity - data appears partilly divided between cache & tempspace
fb_assert(offset + dataSize <= getSize());
flush();
}
const FB_UINT64 writtenBytes = m_space->write(offset, data, dataSize);
fb_assert(writtenBytes == dataSize);
}
@ -844,13 +860,7 @@ void DsqlBatch::DataCache::put(const void* d, ULONG dataSize)
}
// swap ram cache to tempspace
if (!m_space)
m_space = FB_NEW_POOL(getPool()) TempSpace(getPool(), TEMP_NAME);
const FB_UINT64 writtenBytes = m_space->write(m_used, m_cache.begin(), m_cache.getCount());
fb_assert(writtenBytes == m_cache.getCount());
m_used += m_cache.getCount();
m_cache.clear();
flush();
// in a case of huge buffer write directly to tempspace
if (dataSize > m_cacheCapacity / K)
@ -865,6 +875,17 @@ void DsqlBatch::DataCache::put(const void* d, ULONG dataSize)
m_cache.append(data, dataSize);
}
void DsqlBatch::DataCache::flush()
{
if (!m_space)
m_space = FB_NEW_POOL(getPool()) TempSpace(getPool(), TEMP_NAME);
const FB_UINT64 writtenBytes = m_space->write(m_used, m_cache.begin(), m_cache.getCount());
fb_assert(writtenBytes == m_cache.getCount());
m_used += m_cache.getCount();
m_cache.clear();
}
void DsqlBatch::DataCache::align(ULONG alignment)
{
ULONG a = getSize() % alignment;

View File

@ -125,6 +125,7 @@ private:
ULONG getSize() const;
ULONG getCapacity() const;
void clear();
void flush();
private:
typedef Firebird::Array<UCHAR> Cache;