8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-24 06:43:03 +01:00

Bring some consistency into the memory management.

Reduce number of casts for memory buffers.
Use UCHAR instead of confusing BLOB_PTR.
This commit is contained in:
dimitr 2010-03-19 10:54:53 +00:00
parent d7de6b920d
commit 168032571e
4 changed files with 79 additions and 88 deletions

View File

@ -65,7 +65,7 @@ TempSpace::Block::Block(Block* tail, size_t length)
TempSpace::MemoryBlock::MemoryBlock(MemoryPool& pool, Block* tail, size_t length) TempSpace::MemoryBlock::MemoryBlock(MemoryPool& pool, Block* tail, size_t length)
: Block(tail, length) : Block(tail, length)
{ {
ptr = FB_NEW(pool) char[length]; ptr = FB_NEW(pool) UCHAR[length];
} }
TempSpace::MemoryBlock::~MemoryBlock() TempSpace::MemoryBlock::~MemoryBlock()
@ -212,7 +212,7 @@ size_t TempSpace::read(offset_t offset, void* buffer, size_t length)
// search for the first needed block // search for the first needed block
Block* block = findBlock(offset); Block* block = findBlock(offset);
char* p = static_cast<char*>(buffer); UCHAR* p = static_cast<UCHAR*>(buffer);
size_t l = length; size_t l = length;
// read data from the block chain // read data from the block chain
@ -250,7 +250,7 @@ size_t TempSpace::write(offset_t offset, const void* buffer, size_t length)
// search for the first needed block // search for the first needed block
Block* const block = findBlock(offset); Block* const block = findBlock(offset);
const char* p = static_cast<const char*>(buffer); const UCHAR* p = static_cast<const UCHAR*>(buffer);
size_t l = length; size_t l = length;
// write data to as many blocks as necessary // write data to as many blocks as necessary
@ -522,7 +522,7 @@ void TempSpace::releaseSpace(offset_t position, size_t size)
// Return contiguous chunk of memory if present at given location // Return contiguous chunk of memory if present at given location
// //
char* TempSpace::inMemory(offset_t begin, size_t size) const UCHAR* TempSpace::inMemory(offset_t begin, size_t size) const
{ {
const Block* block = findBlock(begin); const Block* block = findBlock(begin);
return block ? block->inMemory(begin, size) : NULL; return block ? block->inMemory(begin, size) : NULL;
@ -535,7 +535,7 @@ char* TempSpace::inMemory(offset_t begin, size_t size) const
// of search range if found // of search range if found
// //
char* TempSpace::findMemory(offset_t& begin, offset_t end, size_t size) const UCHAR* TempSpace::findMemory(offset_t& begin, offset_t end, size_t size) const
{ {
offset_t local_offset = begin; offset_t local_offset = begin;
const offset_t save_begin = begin; const offset_t save_begin = begin;
@ -543,7 +543,7 @@ char* TempSpace::findMemory(offset_t& begin, offset_t end, size_t size) const
while (block && (begin + size <= end)) while (block && (begin + size <= end))
{ {
char* mem = block->inMemory(local_offset, size); UCHAR* mem = block->inMemory(local_offset, size);
if (mem) if (mem)
{ {
return mem; return mem;
@ -611,14 +611,14 @@ size_t TempSpace::allocateBatch(size_t count, size_t minSize, size_t maxSize, Se
offset_t freeEnd = freeSpace ? freeSpace->position + freeSpace->size : 0; offset_t freeEnd = freeSpace ? freeSpace->position + freeSpace->size : 0;
while (segments.getCount() < count && freeSpace) while (segments.getCount() < count && freeSpace)
{ {
char* mem = findMemory(freeSeek, freeEnd, freeMem); UCHAR* mem = findMemory(freeSeek, freeEnd, freeMem);
if (mem) if (mem)
{ {
fb_assert(freeSeek + freeMem <= freeEnd); fb_assert(freeSeek + freeMem <= freeEnd);
#ifdef DEV_BUILD #ifdef DEV_BUILD
offset_t seek1 = freeSeek; offset_t seek1 = freeSeek;
char* p = findMemory(seek1, freeEnd, freeMem); UCHAR* p = findMemory(seek1, freeEnd, freeMem);
fb_assert(p == mem); fb_assert(p == mem);
fb_assert(seek1 == freeSeek); fb_assert(seek1 == freeSeek);
#endif #endif

View File

@ -52,11 +52,11 @@ public:
offset_t allocateSpace(size_t size); offset_t allocateSpace(size_t size);
void releaseSpace(offset_t offset, size_t size); void releaseSpace(offset_t offset, size_t size);
char* inMemory(offset_t offset, size_t size) const; UCHAR* inMemory(offset_t offset, size_t size) const;
struct SegmentInMemory struct SegmentInMemory
{ {
char* memory; UCHAR* memory;
offset_t position; offset_t position;
size_t size; size_t size;
}; };
@ -78,7 +78,7 @@ private:
virtual size_t read(offset_t offset, void* buffer, size_t length) = 0; virtual size_t read(offset_t offset, void* buffer, size_t length) = 0;
virtual size_t write(offset_t offset, const void* buffer, size_t length) = 0; virtual size_t write(offset_t offset, const void* buffer, size_t length) = 0;
virtual char* inMemory(offset_t offset, size_t size) const = 0; virtual UCHAR* inMemory(offset_t offset, size_t size) const = 0;
virtual bool sameFile(const Firebird::TempFile* file) const = 0; virtual bool sameFile(const Firebird::TempFile* file) const = 0;
Block *prev; Block *prev;
@ -95,7 +95,7 @@ private:
size_t read(offset_t offset, void* buffer, size_t length); size_t read(offset_t offset, void* buffer, size_t length);
size_t write(offset_t offset, const void* buffer, size_t length); size_t write(offset_t offset, const void* buffer, size_t length);
char* inMemory(offset_t offset, size_t _size) const UCHAR* inMemory(offset_t offset, size_t _size) const
{ {
if ((offset < this->size) && (offset + _size <= this->size)) if ((offset < this->size) && (offset + _size <= this->size))
return ptr + offset; return ptr + offset;
@ -109,7 +109,7 @@ private:
} }
private: private:
char* ptr; UCHAR* ptr;
}; };
class FileBlock : public Block class FileBlock : public Block
@ -121,7 +121,7 @@ private:
size_t read(offset_t offset, void* buffer, size_t length); size_t read(offset_t offset, void* buffer, size_t length);
size_t write(offset_t offset, const void* buffer, size_t length); size_t write(offset_t offset, const void* buffer, size_t length);
char* inMemory(offset_t /*offset*/, size_t /*a_size*/) const UCHAR* inMemory(offset_t /*offset*/, size_t /*a_size*/) const
{ {
return NULL; return NULL;
} }
@ -144,7 +144,7 @@ private:
return false; return false;
} }
char* findMemory(offset_t& begin, offset_t end, size_t size) const; UCHAR* findMemory(offset_t& begin, offset_t end, size_t size) const;
// free/used segments management // free/used segments management
class Segment class Segment

View File

@ -224,7 +224,7 @@ Sort::Sort(Database* dbb,
#ifdef DEBUG_MERGE #ifdef DEBUG_MERGE
// To debug the merge algorithm, force the in-memory pool to be VERY small // To debug the merge algorithm, force the in-memory pool to be VERY small
m_size_memory = 2000; m_size_memory = 2000;
m_memory = (SORTP*) pool.allocate(m_size_memory); m_memory = FB_NEW(pool) UCHAR[m_size_memory];
#else #else
// Try to get a big chunk of memory, if we can't try smaller and // Try to get a big chunk of memory, if we can't try smaller and
// smaller chunks until we can get the memory. If we get down to // smaller chunks until we can get the memory. If we get down to
@ -236,7 +236,7 @@ Sort::Sort(Database* dbb,
{ {
try try
{ {
m_memory = (SORTP*) pool.allocate(m_size_memory); m_memory = FB_NEW(pool) UCHAR[m_size_memory];
break; break;
} }
catch (const BadAlloc&) catch (const BadAlloc&)
@ -249,7 +249,7 @@ Sort::Sort(Database* dbb,
} }
#endif // DEBUG_MERGE #endif // DEBUG_MERGE
m_end_memory = (SORTP*) ((BLOB_PTR*) m_memory + m_size_memory); m_end_memory = m_memory + m_size_memory;
m_first_pointer = (sort_record**) m_memory; m_first_pointer = (sort_record**) m_memory;
// Set up the temp space // Set up the temp space
@ -291,7 +291,7 @@ Sort::~Sort()
// If runs are allocated and not in the big block, release them. // If runs are allocated and not in the big block, release them.
// Then release the big block. // Then release the big block.
delete m_memory; delete[] m_memory;
// Clean up the runs that were used // Clean up the runs that were used
@ -300,7 +300,7 @@ Sort::~Sort()
{ {
m_runs = run->run_next; m_runs = run->run_next;
if (run->run_buff_alloc) if (run->run_buff_alloc)
delete (UCHAR*) run->run_buffer; delete[] run->run_buffer;
delete run; delete run;
} }
@ -310,11 +310,11 @@ Sort::~Sort()
{ {
m_free_runs = run->run_next; m_free_runs = run->run_next;
if (run->run_buff_alloc) if (run->run_buff_alloc)
delete (UCHAR*) run->run_buffer; delete[] run->run_buffer;
delete run; delete run;
} }
delete m_merge_pool; delete[] m_merge_pool;
} }
@ -403,8 +403,8 @@ void Sort::put(thread_db* tdbb, ULONG** record_address)
// Check that we are not at the beginning of the buffer in addition // Check that we are not at the beginning of the buffer in addition
// to checking for space for the record. This avoids the pointer // to checking for space for the record. This avoids the pointer
// record from underflowing in the second condition. // record from underflowing in the second condition.
if ((BLOB_PTR*) record < (BLOB_PTR*) (m_memory + m_longs) || if ((UCHAR*) record < m_memory + m_longs ||
(BLOB_PTR*) NEXT_RECORD(record) <= (BLOB_PTR*) (m_next_pointer + 1)) (UCHAR*) NEXT_RECORD(record) <= (UCHAR*) (m_next_pointer + 1))
{ {
putRun(); putRun();
while (true) while (true)
@ -514,14 +514,13 @@ void Sort::sort(thread_db* tdbb)
{ {
if (run->run_buff_alloc) if (run->run_buff_alloc)
{ {
delete (UCHAR*) run->run_buffer; delete[] run->run_buffer;
run->run_buff_alloc = false; run->run_buff_alloc = false;
} }
++run_count; ++run_count;
} }
run_merge_hdr** streams = AutoPtr<run_merge_hdr*> streams(FB_NEW(m_owner->getPool()) run_merge_hdr*[run_count]);
(run_merge_hdr**) m_owner->getPool().allocate(run_count * sizeof(run_merge_hdr*));
run_merge_hdr** m1 = streams; run_merge_hdr** m1 = streams;
for (run = m_runs; run; run = run->run_next) for (run = m_runs; run; run = run->run_next)
@ -534,24 +533,15 @@ void Sort::sort(thread_db* tdbb)
if (count > 1) if (count > 1)
{ {
fb_assert(!m_merge_pool); // shouldn't have a pool fb_assert(!m_merge_pool); // shouldn't have a pool
try m_merge_pool = FB_NEW(m_owner->getPool()) merge_control[count - 1];
{ merge_pool = m_merge_pool;
m_merge_pool = memset(merge_pool, 0, (count - 1) * sizeof(merge_control));
(merge_control*) m_owner->getPool().allocate((count - 1) * sizeof(merge_control));
merge_pool = m_merge_pool;
memset(merge_pool, 0, (count - 1) * sizeof(merge_control));
}
catch (const BadAlloc&)
{
delete streams;
throw;
}
} }
else else
{ {
// Merge of 1 or 0 runs doesn't make sense // Merge of 1 or 0 runs doesn't make sense
fb_assert(false); // We really shouldn't get here fb_assert(false); // We really shouldn't get here
merge = (merge_control*) * streams; // But if we do... merge = (merge_control*) *streams; // But if we do...
} }
// Each pass through the vector builds a level of the merge tree // Each pass through the vector builds a level of the merge tree
@ -596,7 +586,7 @@ void Sort::sort(thread_db* tdbb)
count = m2 - streams; count = m2 - streams;
} }
delete streams; streams.reset();
merge->mrg_header.rmh_parent = NULL; merge->mrg_header.rmh_parent = NULL;
m_merge = merge; m_merge = merge;
@ -619,25 +609,25 @@ void Sort::sort(thread_db* tdbb)
if (!run->run_buffer) if (!run->run_buffer)
{ {
int mem_size = MIN(allocSize / rec_size, run->run_records) * rec_size; int mem_size = MIN(allocSize / rec_size, run->run_records) * rec_size;
char* mem = NULL; UCHAR* mem = NULL;
try try
{ {
mem = (char*) m_owner->getPool().allocate(mem_size); mem = FB_NEW(m_owner->getPool()) UCHAR[mem_size];
} }
catch (const BadAlloc&) catch (const BadAlloc&)
{ {
mem_size = (mem_size / (2 * rec_size)) * rec_size; mem_size = (mem_size / (2 * rec_size)) * rec_size;
if (!mem_size) if (!mem_size)
throw; throw;
mem = (char*) m_owner->getPool().allocate(mem_size); mem = FB_NEW(m_owner->getPool()) UCHAR[mem_size];
} }
run->run_buff_alloc = true; run->run_buff_alloc = true;
run->run_buff_cache = false; run->run_buff_cache = false;
run->run_buffer = reinterpret_cast<SORTP*>(mem); run->run_buffer = mem;
mem += mem_size; mem += mem_size;
run->run_record = reinterpret_cast<sort_record*>(mem); run->run_record = reinterpret_cast<sort_record*>(mem);
run->run_end_buffer = reinterpret_cast<SORTP*> (mem); run->run_end_buffer = mem;
} }
} }
} }
@ -803,7 +793,7 @@ void Sort::diddleKey(UCHAR* record, bool direction)
for (sort_key_def* key = m_description.begin(), *end = m_description.end(); key < end; key++) for (sort_key_def* key = m_description.begin(), *end = m_description.end(); key < end; key++)
{ {
BLOB_PTR* p = (BLOB_PTR*) record + key->skd_offset; UCHAR* p = (UCHAR*) record + key->skd_offset;
USHORT* wp = (USHORT*) p; USHORT* wp = (USHORT*) p;
SORTP* lwp = (SORTP*) p; SORTP* lwp = (SORTP*) p;
USHORT complement = key->skd_flags & SKD_descending; USHORT complement = key->skd_flags & SKD_descending;
@ -875,7 +865,7 @@ void Sort::diddleKey(UCHAR* record, bool direction)
*p = c1; *p = c1;
p += 3; p += 3;
} }
p = (BLOB_PTR*) wp; p = (UCHAR*) wp;
break; break;
case SKD_short: case SKD_short:
@ -983,13 +973,13 @@ void Sort::diddleKey(UCHAR* record, bool direction)
if (key->skd_dtype == SKD_varying && !direction) if (key->skd_dtype == SKD_varying && !direction)
{ {
p = (BLOB_PTR*) record + key->skd_offset; p = (UCHAR*) record + key->skd_offset;
((vary*) p)->vary_length = *((USHORT*) (record + key->skd_vary_offset)); ((vary*) p)->vary_length = *((USHORT*) (record + key->skd_vary_offset));
} }
if (key->skd_dtype == SKD_cstring && !direction) if (key->skd_dtype == SKD_cstring && !direction)
{ {
p = (BLOB_PTR*) record + key->skd_offset; p = (UCHAR*) record + key->skd_offset;
USHORT l = *((USHORT*) (record + key->skd_vary_offset)); USHORT l = *((USHORT*) (record + key->skd_vary_offset));
*(p + l) = 0; *(p + l) = 0;
} }
@ -1035,8 +1025,7 @@ sort_record* Sort::getMerge(merge_control* merge)
// Find the appropriate record in the buffer to return // Find the appropriate record in the buffer to return
if ((record = (sort_record*) run->run_record) < if ((record = (sort_record*) run->run_record) < (sort_record*) run->run_end_buffer)
(sort_record*) run->run_end_buffer)
{ {
run->run_record = reinterpret_cast<sort_record*>(NEXT_RUN_RECORD(run->run_record)); run->run_record = reinterpret_cast<sort_record*>(NEXT_RUN_RECORD(run->run_record));
--run->run_records; --run->run_records;
@ -1046,10 +1035,10 @@ sort_record* Sort::getMerge(merge_control* merge)
// There are records remaining, but the buffer is full. // There are records remaining, but the buffer is full.
// Read a buffer full. // Read a buffer full.
l = (ULONG) ((BLOB_PTR*) run->run_end_buffer - (BLOB_PTR*) run->run_buffer); l = (ULONG) (run->run_end_buffer - run->run_buffer);
n = run->run_records * m_longs * sizeof(ULONG); n = run->run_records * m_longs * sizeof(ULONG);
l = MIN(l, n); l = MIN(l, n);
run->run_seek = readBlock(m_space, run->run_seek, (UCHAR*) run->run_buffer, l); run->run_seek = readBlock(m_space, run->run_seek, run->run_buffer, l);
record = reinterpret_cast<sort_record*>(run->run_buffer); record = reinterpret_cast<sort_record*>(run->run_buffer);
run->run_record = run->run_record =
@ -1189,21 +1178,24 @@ void Sort::init()
m_runs->run_depth == MAX_MERGE_LEVEL) m_runs->run_depth == MAX_MERGE_LEVEL)
{ {
const ULONG mem_size = MAX_SORT_BUFFER_SIZE * RUN_GROUP; const ULONG mem_size = MAX_SORT_BUFFER_SIZE * RUN_GROUP;
void* const mem = m_owner->getPool().allocate_nothrow(mem_size);
if (mem) try
{ {
m_owner->getPool().deallocate(m_memory); UCHAR* const mem = FB_NEW(m_owner->getPool()) UCHAR[mem_size];
m_memory = (SORTP*) mem; delete[] m_memory;
m_memory = mem;
m_size_memory = mem_size; m_size_memory = mem_size;
m_end_memory = (SORTP*) ((BLOB_PTR*) m_memory + m_size_memory); m_end_memory = m_memory + m_size_memory;
m_first_pointer = (sort_record**) m_memory; m_first_pointer = (sort_record**) m_memory;
for (run_control *run = m_runs; run; run = run->run_next) for (run_control *run = m_runs; run; run = run->run_next)
run->run_depth--; run->run_depth--;
} }
catch (const BadAlloc&)
{} // no-op
} }
m_next_pointer = m_first_pointer; m_next_pointer = m_first_pointer;
@ -1252,15 +1244,15 @@ ULONG Sort::allocate(ULONG n, ULONG chunkSize, bool useFreeSpace)
// if some run's already in memory cache - use this memory // if some run's already in memory cache - use this memory
for (run = m_runs, count = 0; count < n; run = run->run_next, count++) for (run = m_runs, count = 0; count < n; run = run->run_next, count++)
{ {
run->run_buffer = 0; run->run_buffer = NULL;
char* mem = 0; UCHAR* mem = NULL;
if (mem = m_space->inMemory(run->run_seek, run->run_size)) if ( (mem = m_space->inMemory(run->run_seek, run->run_size)) )
{ {
run->run_buffer = reinterpret_cast<SORTP*>(mem); run->run_buffer = mem;
run->run_record = reinterpret_cast<sort_record*>(mem); run->run_record = reinterpret_cast<sort_record*>(mem);
mem += run->run_size; mem += run->run_size;
run->run_end_buffer = reinterpret_cast<SORTP*>(mem); run->run_end_buffer = mem;
run->run_seek += run->run_size; // emulate read run->run_seek += run->run_size; // emulate read
allocated++; allocated++;
} }
@ -1284,14 +1276,14 @@ ULONG Sort::allocate(ULONG n, ULONG chunkSize, bool useFreeSpace)
if (!run->run_buffer) if (!run->run_buffer)
{ {
const size_t runSize = MIN(seg->size / rec_size, run->run_records) * rec_size; const size_t runSize = MIN(seg->size / rec_size, run->run_records) * rec_size;
char* mem = seg->memory; UCHAR* mem = seg->memory;
run->run_mem_seek = seg->position; run->run_mem_seek = seg->position;
run->run_mem_size = (ULONG) seg->size; run->run_mem_size = (ULONG) seg->size;
run->run_buffer = reinterpret_cast<SORTP*>(mem); run->run_buffer = mem;
mem += runSize; mem += runSize;
run->run_record = reinterpret_cast<sort_record*>(mem); run->run_record = reinterpret_cast<sort_record*>(mem);
run->run_end_buffer = reinterpret_cast<SORTP*>(mem); run->run_end_buffer = mem;
seg++; seg++;
if (seg == lastSeg) if (seg == lastSeg)
@ -1325,11 +1317,11 @@ void Sort::mergeRuns(USHORT n)
// space requirements, and filling in a vector of streams with run pointers // space requirements, and filling in a vector of streams with run pointers
const USHORT rec_size = m_longs << SHIFTLONG; const USHORT rec_size = m_longs << SHIFTLONG;
BLOB_PTR* buffer = (BLOB_PTR*) m_first_pointer; UCHAR* buffer = (UCHAR*) m_first_pointer;
run_control temp_run; run_control temp_run;
memset(&temp_run, 0, sizeof(run_control)); memset(&temp_run, 0, sizeof(run_control));
temp_run.run_end_buffer = (SORTP*) (buffer + (m_size_memory / rec_size) * rec_size); temp_run.run_end_buffer = buffer + (m_size_memory / rec_size) * rec_size;
temp_run.run_size = 0; temp_run.run_size = 0;
temp_run.run_buff_alloc = false; temp_run.run_buff_alloc = false;
@ -1364,25 +1356,24 @@ void Sort::mergeRuns(USHORT n)
{ {
if (!run->run_buff_alloc) if (!run->run_buff_alloc)
{ {
run->run_buffer = (ULONG*) m_owner->getPool().allocate(rec_size * 2); run->run_buffer = FB_NEW(m_owner->getPool()) UCHAR[rec_size * 2];
run->run_buff_alloc = true; run->run_buff_alloc = true;
} }
run->run_end_buffer = run->run_end_buffer = run->run_buffer + (rec_size * 2);
reinterpret_cast<ULONG*>((BLOB_PTR*) run->run_buffer + (rec_size * 2));
run->run_record = reinterpret_cast<sort_record*>(run->run_end_buffer); run->run_record = reinterpret_cast<sort_record*>(run->run_end_buffer);
} }
else else
{ {
run->run_buffer = (ULONG*) buffer; run->run_buffer = buffer;
buffer += size; buffer += size;
run->run_record = run->run_end_buffer = buffer;
reinterpret_cast<sort_record*>(run->run_end_buffer = (ULONG*) buffer); run->run_record = reinterpret_cast<sort_record*>(run->run_end_buffer);
} }
} }
temp_run.run_size += run->run_size; temp_run.run_size += run->run_size;
} }
temp_run.run_record = reinterpret_cast<sort_record*>(buffer); temp_run.run_record = reinterpret_cast<sort_record*>(buffer);
temp_run.run_buffer = reinterpret_cast<ULONG*>(temp_run.run_record); temp_run.run_buffer = reinterpret_cast<UCHAR*>(temp_run.run_record);
temp_run.run_buff_cache = false; temp_run.run_buff_cache = false;
// Build merge tree bottom up. // Build merge tree bottom up.
@ -1437,8 +1428,8 @@ void Sort::mergeRuns(USHORT n)
{ {
if (q >= (sort_record*) temp_run.run_end_buffer) if (q >= (sort_record*) temp_run.run_end_buffer)
{ {
size = (BLOB_PTR*) q - (BLOB_PTR*) temp_run.run_buffer; size = (UCHAR*) q - temp_run.run_buffer;
seek = writeBlock(m_space, seek, (UCHAR*) temp_run.run_buffer, size); seek = writeBlock(m_space, seek, temp_run.run_buffer, size);
q = reinterpret_cast<sort_record*>(temp_run.run_buffer); q = reinterpret_cast<sort_record*>(temp_run.run_buffer);
} }
count = m_longs; count = m_longs;
@ -1450,8 +1441,8 @@ void Sort::mergeRuns(USHORT n)
// Write the tail of the new run and return any unused space // Write the tail of the new run and return any unused space
if ( (size = (BLOB_PTR*) q - (BLOB_PTR*) temp_run.run_buffer) ) if ( (size = (UCHAR*) q - temp_run.run_buffer) )
seek = writeBlock(m_space, seek, (UCHAR*) temp_run.run_buffer, size); seek = writeBlock(m_space, seek, temp_run.run_buffer, size);
// If the records did not fill the allocated run (such as when duplicates are // If the records did not fill the allocated run (such as when duplicates are
// rejected), then free the remainder and diminish the size of the run accordingly // rejected), then free the remainder and diminish the size of the run accordingly
@ -1493,7 +1484,7 @@ void Sort::mergeRuns(USHORT n)
m_free_runs = run->run_next; m_free_runs = run->run_next;
if (run->run_buff_alloc) if (run->run_buff_alloc)
{ {
delete (UCHAR*) run->run_buffer; delete[] run->run_buffer;
run->run_buff_alloc = false; run->run_buff_alloc = false;
} }
temp_run.run_header.rmh_type = RMH_TYPE_RUN; temp_run.run_header.rmh_type = RMH_TYPE_RUN;
@ -1766,7 +1757,7 @@ void Sort::orderAndSave()
run->run_size = run->run_records * key_length; run->run_size = run->run_records * key_length;
run->run_seek = m_space->allocateSpace(run->run_size); run->run_seek = m_space->allocateSpace(run->run_size);
char* mem = m_space->inMemory(run->run_seek, run->run_size); UCHAR* mem = m_space->inMemory(run->run_seek, run->run_size);
if (mem) if (mem)
{ {

View File

@ -192,8 +192,8 @@ struct run_control
FB_UINT64 run_seek; // Offset in file of run FB_UINT64 run_seek; // Offset in file of run
FB_UINT64 run_size; // Length of run in work file FB_UINT64 run_size; // Length of run in work file
sort_record* run_record; // Next record in run sort_record* run_record; // Next record in run
SORTP* run_buffer; // Run buffer UCHAR* run_buffer; // Run buffer
SORTP* run_end_buffer; // End of buffer UCHAR* run_end_buffer; // End of buffer
bool run_buff_alloc; // Allocated buffer flag bool run_buff_alloc; // Allocated buffer flag
bool run_buff_cache; // run buffer is already in cache bool run_buff_cache; // run buffer is already in cache
FB_UINT64 run_mem_seek; // position of run's buffer in in-memory part of sort file FB_UINT64 run_mem_seek; // position of run's buffer in in-memory part of sort file
@ -228,14 +228,14 @@ public:
void put(Jrd::thread_db*, ULONG**); void put(Jrd::thread_db*, ULONG**);
void sort(Jrd::thread_db*); void sort(Jrd::thread_db*);
static FB_UINT64 readBlock(TempSpace* space, FB_UINT64 seek, BLOB_PTR* address, ULONG length) static FB_UINT64 readBlock(TempSpace* space, FB_UINT64 seek, UCHAR* address, ULONG length)
{ {
const size_t bytes = space->read(seek, address, length); const size_t bytes = space->read(seek, address, length);
fb_assert(bytes == length); fb_assert(bytes == length);
return seek + bytes; return seek + bytes;
} }
static FB_UINT64 writeBlock(TempSpace* space, FB_UINT64 seek, BLOB_PTR* address, ULONG length) static FB_UINT64 writeBlock(TempSpace* space, FB_UINT64 seek, UCHAR* address, ULONG length)
{ {
const size_t bytes = space->write(seek, address, length); const size_t bytes = space->write(seek, address, length);
fb_assert(bytes == length); fb_assert(bytes == length);
@ -262,8 +262,8 @@ private:
Database* m_dbb; // Database Database* m_dbb; // Database
SortOwner* m_owner; // Sort owner SortOwner* m_owner; // Sort owner
SORTP* m_memory; // ALLOC: Memory for sort UCHAR* m_memory; // ALLOC: Memory for sort
SORTP* m_end_memory; // End of memory UCHAR* m_end_memory; // End of memory
ULONG m_size_memory; // Bytes allocated ULONG m_size_memory; // Bytes allocated
SR* m_last_record; // Address of last record SR* m_last_record; // Address of last record
sort_record** m_first_pointer; // Memory for sort sort_record** m_first_pointer; // Memory for sort