8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-24 06:43:03 +01:00

Bring some consistency into the memory management.

Reduce number of casts for memory buffers.
Use UCHAR instead of confusing BLOB_PTR.
This commit is contained in:
dimitr 2010-03-19 10:54:53 +00:00
parent d7de6b920d
commit 168032571e
4 changed files with 79 additions and 88 deletions

View File

@ -65,7 +65,7 @@ TempSpace::Block::Block(Block* tail, size_t length)
TempSpace::MemoryBlock::MemoryBlock(MemoryPool& pool, Block* tail, size_t length)
: Block(tail, length)
{
ptr = FB_NEW(pool) char[length];
ptr = FB_NEW(pool) UCHAR[length];
}
TempSpace::MemoryBlock::~MemoryBlock()
@ -212,7 +212,7 @@ size_t TempSpace::read(offset_t offset, void* buffer, size_t length)
// search for the first needed block
Block* block = findBlock(offset);
char* p = static_cast<char*>(buffer);
UCHAR* p = static_cast<UCHAR*>(buffer);
size_t l = length;
// read data from the block chain
@ -250,7 +250,7 @@ size_t TempSpace::write(offset_t offset, const void* buffer, size_t length)
// search for the first needed block
Block* const block = findBlock(offset);
const char* p = static_cast<const char*>(buffer);
const UCHAR* p = static_cast<const UCHAR*>(buffer);
size_t l = length;
// write data to as many blocks as necessary
@ -522,7 +522,7 @@ void TempSpace::releaseSpace(offset_t position, size_t size)
// Return contiguous chunk of memory if present at given location
//
char* TempSpace::inMemory(offset_t begin, size_t size) const
UCHAR* TempSpace::inMemory(offset_t begin, size_t size) const
{
const Block* block = findBlock(begin);
return block ? block->inMemory(begin, size) : NULL;
@ -535,7 +535,7 @@ char* TempSpace::inMemory(offset_t begin, size_t size) const
// of search range if found
//
char* TempSpace::findMemory(offset_t& begin, offset_t end, size_t size) const
UCHAR* TempSpace::findMemory(offset_t& begin, offset_t end, size_t size) const
{
offset_t local_offset = begin;
const offset_t save_begin = begin;
@ -543,7 +543,7 @@ char* TempSpace::findMemory(offset_t& begin, offset_t end, size_t size) const
while (block && (begin + size <= end))
{
char* mem = block->inMemory(local_offset, size);
UCHAR* mem = block->inMemory(local_offset, size);
if (mem)
{
return mem;
@ -611,14 +611,14 @@ size_t TempSpace::allocateBatch(size_t count, size_t minSize, size_t maxSize, Se
offset_t freeEnd = freeSpace ? freeSpace->position + freeSpace->size : 0;
while (segments.getCount() < count && freeSpace)
{
char* mem = findMemory(freeSeek, freeEnd, freeMem);
UCHAR* mem = findMemory(freeSeek, freeEnd, freeMem);
if (mem)
{
fb_assert(freeSeek + freeMem <= freeEnd);
#ifdef DEV_BUILD
offset_t seek1 = freeSeek;
char* p = findMemory(seek1, freeEnd, freeMem);
UCHAR* p = findMemory(seek1, freeEnd, freeMem);
fb_assert(p == mem);
fb_assert(seek1 == freeSeek);
#endif

View File

@ -52,11 +52,11 @@ public:
offset_t allocateSpace(size_t size);
void releaseSpace(offset_t offset, size_t size);
char* inMemory(offset_t offset, size_t size) const;
UCHAR* inMemory(offset_t offset, size_t size) const;
struct SegmentInMemory
{
char* memory;
UCHAR* memory;
offset_t position;
size_t size;
};
@ -78,7 +78,7 @@ private:
virtual size_t read(offset_t offset, void* buffer, size_t length) = 0;
virtual size_t write(offset_t offset, const void* buffer, size_t length) = 0;
virtual char* inMemory(offset_t offset, size_t size) const = 0;
virtual UCHAR* inMemory(offset_t offset, size_t size) const = 0;
virtual bool sameFile(const Firebird::TempFile* file) const = 0;
Block *prev;
@ -95,7 +95,7 @@ private:
size_t read(offset_t offset, void* buffer, size_t length);
size_t write(offset_t offset, const void* buffer, size_t length);
char* inMemory(offset_t offset, size_t _size) const
UCHAR* inMemory(offset_t offset, size_t _size) const
{
if ((offset < this->size) && (offset + _size <= this->size))
return ptr + offset;
@ -109,7 +109,7 @@ private:
}
private:
char* ptr;
UCHAR* ptr;
};
class FileBlock : public Block
@ -121,7 +121,7 @@ private:
size_t read(offset_t offset, void* buffer, size_t length);
size_t write(offset_t offset, const void* buffer, size_t length);
char* inMemory(offset_t /*offset*/, size_t /*a_size*/) const
UCHAR* inMemory(offset_t /*offset*/, size_t /*a_size*/) const
{
return NULL;
}
@ -144,7 +144,7 @@ private:
return false;
}
char* findMemory(offset_t& begin, offset_t end, size_t size) const;
UCHAR* findMemory(offset_t& begin, offset_t end, size_t size) const;
// free/used segments management
class Segment

View File

@ -224,7 +224,7 @@ Sort::Sort(Database* dbb,
#ifdef DEBUG_MERGE
// To debug the merge algorithm, force the in-memory pool to be VERY small
m_size_memory = 2000;
m_memory = (SORTP*) pool.allocate(m_size_memory);
m_memory = FB_NEW(pool) UCHAR[m_size_memory];
#else
// Try to get a big chunk of memory, if we can't try smaller and
// smaller chunks until we can get the memory. If we get down to
@ -236,7 +236,7 @@ Sort::Sort(Database* dbb,
{
try
{
m_memory = (SORTP*) pool.allocate(m_size_memory);
m_memory = FB_NEW(pool) UCHAR[m_size_memory];
break;
}
catch (const BadAlloc&)
@ -249,7 +249,7 @@ Sort::Sort(Database* dbb,
}
#endif // DEBUG_MERGE
m_end_memory = (SORTP*) ((BLOB_PTR*) m_memory + m_size_memory);
m_end_memory = m_memory + m_size_memory;
m_first_pointer = (sort_record**) m_memory;
// Set up the temp space
@ -291,7 +291,7 @@ Sort::~Sort()
// If runs are allocated and not in the big block, release them.
// Then release the big block.
delete m_memory;
delete[] m_memory;
// Clean up the runs that were used
@ -300,7 +300,7 @@ Sort::~Sort()
{
m_runs = run->run_next;
if (run->run_buff_alloc)
delete (UCHAR*) run->run_buffer;
delete[] run->run_buffer;
delete run;
}
@ -310,11 +310,11 @@ Sort::~Sort()
{
m_free_runs = run->run_next;
if (run->run_buff_alloc)
delete (UCHAR*) run->run_buffer;
delete[] run->run_buffer;
delete run;
}
delete m_merge_pool;
delete[] m_merge_pool;
}
@ -403,8 +403,8 @@ void Sort::put(thread_db* tdbb, ULONG** record_address)
// Check that we are not at the beginning of the buffer in addition
// to checking for space for the record. This avoids the pointer
// record from underflowing in the second condition.
if ((BLOB_PTR*) record < (BLOB_PTR*) (m_memory + m_longs) ||
(BLOB_PTR*) NEXT_RECORD(record) <= (BLOB_PTR*) (m_next_pointer + 1))
if ((UCHAR*) record < m_memory + m_longs ||
(UCHAR*) NEXT_RECORD(record) <= (UCHAR*) (m_next_pointer + 1))
{
putRun();
while (true)
@ -514,14 +514,13 @@ void Sort::sort(thread_db* tdbb)
{
if (run->run_buff_alloc)
{
delete (UCHAR*) run->run_buffer;
delete[] run->run_buffer;
run->run_buff_alloc = false;
}
++run_count;
}
run_merge_hdr** streams =
(run_merge_hdr**) m_owner->getPool().allocate(run_count * sizeof(run_merge_hdr*));
AutoPtr<run_merge_hdr*> streams(FB_NEW(m_owner->getPool()) run_merge_hdr*[run_count]);
run_merge_hdr** m1 = streams;
for (run = m_runs; run; run = run->run_next)
@ -534,24 +533,15 @@ void Sort::sort(thread_db* tdbb)
if (count > 1)
{
fb_assert(!m_merge_pool); // shouldn't have a pool
try
{
m_merge_pool =
(merge_control*) m_owner->getPool().allocate((count - 1) * sizeof(merge_control));
m_merge_pool = FB_NEW(m_owner->getPool()) merge_control[count - 1];
merge_pool = m_merge_pool;
memset(merge_pool, 0, (count - 1) * sizeof(merge_control));
}
catch (const BadAlloc&)
{
delete streams;
throw;
}
}
else
{
// Merge of 1 or 0 runs doesn't make sense
fb_assert(false); // We really shouldn't get here
merge = (merge_control*) * streams; // But if we do...
merge = (merge_control*) *streams; // But if we do...
}
// Each pass through the vector builds a level of the merge tree
@ -596,7 +586,7 @@ void Sort::sort(thread_db* tdbb)
count = m2 - streams;
}
delete streams;
streams.reset();
merge->mrg_header.rmh_parent = NULL;
m_merge = merge;
@ -619,25 +609,25 @@ void Sort::sort(thread_db* tdbb)
if (!run->run_buffer)
{
int mem_size = MIN(allocSize / rec_size, run->run_records) * rec_size;
char* mem = NULL;
UCHAR* mem = NULL;
try
{
mem = (char*) m_owner->getPool().allocate(mem_size);
mem = FB_NEW(m_owner->getPool()) UCHAR[mem_size];
}
catch (const BadAlloc&)
{
mem_size = (mem_size / (2 * rec_size)) * rec_size;
if (!mem_size)
throw;
mem = (char*) m_owner->getPool().allocate(mem_size);
mem = FB_NEW(m_owner->getPool()) UCHAR[mem_size];
}
run->run_buff_alloc = true;
run->run_buff_cache = false;
run->run_buffer = reinterpret_cast<SORTP*>(mem);
run->run_buffer = mem;
mem += mem_size;
run->run_record = reinterpret_cast<sort_record*>(mem);
run->run_end_buffer = reinterpret_cast<SORTP*> (mem);
run->run_end_buffer = mem;
}
}
}
@ -803,7 +793,7 @@ void Sort::diddleKey(UCHAR* record, bool direction)
for (sort_key_def* key = m_description.begin(), *end = m_description.end(); key < end; key++)
{
BLOB_PTR* p = (BLOB_PTR*) record + key->skd_offset;
UCHAR* p = (UCHAR*) record + key->skd_offset;
USHORT* wp = (USHORT*) p;
SORTP* lwp = (SORTP*) p;
USHORT complement = key->skd_flags & SKD_descending;
@ -875,7 +865,7 @@ void Sort::diddleKey(UCHAR* record, bool direction)
*p = c1;
p += 3;
}
p = (BLOB_PTR*) wp;
p = (UCHAR*) wp;
break;
case SKD_short:
@ -983,13 +973,13 @@ void Sort::diddleKey(UCHAR* record, bool direction)
if (key->skd_dtype == SKD_varying && !direction)
{
p = (BLOB_PTR*) record + key->skd_offset;
p = (UCHAR*) record + key->skd_offset;
((vary*) p)->vary_length = *((USHORT*) (record + key->skd_vary_offset));
}
if (key->skd_dtype == SKD_cstring && !direction)
{
p = (BLOB_PTR*) record + key->skd_offset;
p = (UCHAR*) record + key->skd_offset;
USHORT l = *((USHORT*) (record + key->skd_vary_offset));
*(p + l) = 0;
}
@ -1035,8 +1025,7 @@ sort_record* Sort::getMerge(merge_control* merge)
// Find the appropriate record in the buffer to return
if ((record = (sort_record*) run->run_record) <
(sort_record*) run->run_end_buffer)
if ((record = (sort_record*) run->run_record) < (sort_record*) run->run_end_buffer)
{
run->run_record = reinterpret_cast<sort_record*>(NEXT_RUN_RECORD(run->run_record));
--run->run_records;
@ -1046,10 +1035,10 @@ sort_record* Sort::getMerge(merge_control* merge)
// There are records remaining, but the buffer is full.
// Read a buffer full.
l = (ULONG) ((BLOB_PTR*) run->run_end_buffer - (BLOB_PTR*) run->run_buffer);
l = (ULONG) (run->run_end_buffer - run->run_buffer);
n = run->run_records * m_longs * sizeof(ULONG);
l = MIN(l, n);
run->run_seek = readBlock(m_space, run->run_seek, (UCHAR*) run->run_buffer, l);
run->run_seek = readBlock(m_space, run->run_seek, run->run_buffer, l);
record = reinterpret_cast<sort_record*>(run->run_buffer);
run->run_record =
@ -1189,21 +1178,24 @@ void Sort::init()
m_runs->run_depth == MAX_MERGE_LEVEL)
{
const ULONG mem_size = MAX_SORT_BUFFER_SIZE * RUN_GROUP;
void* const mem = m_owner->getPool().allocate_nothrow(mem_size);
if (mem)
try
{
m_owner->getPool().deallocate(m_memory);
UCHAR* const mem = FB_NEW(m_owner->getPool()) UCHAR[mem_size];
m_memory = (SORTP*) mem;
delete[] m_memory;
m_memory = mem;
m_size_memory = mem_size;
m_end_memory = (SORTP*) ((BLOB_PTR*) m_memory + m_size_memory);
m_end_memory = m_memory + m_size_memory;
m_first_pointer = (sort_record**) m_memory;
for (run_control *run = m_runs; run; run = run->run_next)
run->run_depth--;
}
catch (const BadAlloc&)
{} // no-op
}
m_next_pointer = m_first_pointer;
@ -1252,15 +1244,15 @@ ULONG Sort::allocate(ULONG n, ULONG chunkSize, bool useFreeSpace)
// if some run's already in memory cache - use this memory
for (run = m_runs, count = 0; count < n; run = run->run_next, count++)
{
run->run_buffer = 0;
run->run_buffer = NULL;
char* mem = 0;
if (mem = m_space->inMemory(run->run_seek, run->run_size))
UCHAR* mem = NULL;
if ( (mem = m_space->inMemory(run->run_seek, run->run_size)) )
{
run->run_buffer = reinterpret_cast<SORTP*>(mem);
run->run_buffer = mem;
run->run_record = reinterpret_cast<sort_record*>(mem);
mem += run->run_size;
run->run_end_buffer = reinterpret_cast<SORTP*>(mem);
run->run_end_buffer = mem;
run->run_seek += run->run_size; // emulate read
allocated++;
}
@ -1284,14 +1276,14 @@ ULONG Sort::allocate(ULONG n, ULONG chunkSize, bool useFreeSpace)
if (!run->run_buffer)
{
const size_t runSize = MIN(seg->size / rec_size, run->run_records) * rec_size;
char* mem = seg->memory;
UCHAR* mem = seg->memory;
run->run_mem_seek = seg->position;
run->run_mem_size = (ULONG) seg->size;
run->run_buffer = reinterpret_cast<SORTP*>(mem);
run->run_buffer = mem;
mem += runSize;
run->run_record = reinterpret_cast<sort_record*>(mem);
run->run_end_buffer = reinterpret_cast<SORTP*>(mem);
run->run_end_buffer = mem;
seg++;
if (seg == lastSeg)
@ -1325,11 +1317,11 @@ void Sort::mergeRuns(USHORT n)
// space requirements, and filling in a vector of streams with run pointers
const USHORT rec_size = m_longs << SHIFTLONG;
BLOB_PTR* buffer = (BLOB_PTR*) m_first_pointer;
UCHAR* buffer = (UCHAR*) m_first_pointer;
run_control temp_run;
memset(&temp_run, 0, sizeof(run_control));
temp_run.run_end_buffer = (SORTP*) (buffer + (m_size_memory / rec_size) * rec_size);
temp_run.run_end_buffer = buffer + (m_size_memory / rec_size) * rec_size;
temp_run.run_size = 0;
temp_run.run_buff_alloc = false;
@ -1364,25 +1356,24 @@ void Sort::mergeRuns(USHORT n)
{
if (!run->run_buff_alloc)
{
run->run_buffer = (ULONG*) m_owner->getPool().allocate(rec_size * 2);
run->run_buffer = FB_NEW(m_owner->getPool()) UCHAR[rec_size * 2];
run->run_buff_alloc = true;
}
run->run_end_buffer =
reinterpret_cast<ULONG*>((BLOB_PTR*) run->run_buffer + (rec_size * 2));
run->run_end_buffer = run->run_buffer + (rec_size * 2);
run->run_record = reinterpret_cast<sort_record*>(run->run_end_buffer);
}
else
{
run->run_buffer = (ULONG*) buffer;
run->run_buffer = buffer;
buffer += size;
run->run_record =
reinterpret_cast<sort_record*>(run->run_end_buffer = (ULONG*) buffer);
run->run_end_buffer = buffer;
run->run_record = reinterpret_cast<sort_record*>(run->run_end_buffer);
}
}
temp_run.run_size += run->run_size;
}
temp_run.run_record = reinterpret_cast<sort_record*>(buffer);
temp_run.run_buffer = reinterpret_cast<ULONG*>(temp_run.run_record);
temp_run.run_buffer = reinterpret_cast<UCHAR*>(temp_run.run_record);
temp_run.run_buff_cache = false;
// Build merge tree bottom up.
@ -1437,8 +1428,8 @@ void Sort::mergeRuns(USHORT n)
{
if (q >= (sort_record*) temp_run.run_end_buffer)
{
size = (BLOB_PTR*) q - (BLOB_PTR*) temp_run.run_buffer;
seek = writeBlock(m_space, seek, (UCHAR*) temp_run.run_buffer, size);
size = (UCHAR*) q - temp_run.run_buffer;
seek = writeBlock(m_space, seek, temp_run.run_buffer, size);
q = reinterpret_cast<sort_record*>(temp_run.run_buffer);
}
count = m_longs;
@ -1450,8 +1441,8 @@ void Sort::mergeRuns(USHORT n)
// Write the tail of the new run and return any unused space
if ( (size = (BLOB_PTR*) q - (BLOB_PTR*) temp_run.run_buffer) )
seek = writeBlock(m_space, seek, (UCHAR*) temp_run.run_buffer, size);
if ( (size = (UCHAR*) q - temp_run.run_buffer) )
seek = writeBlock(m_space, seek, temp_run.run_buffer, size);
// If the records did not fill the allocated run (such as when duplicates are
// rejected), then free the remainder and diminish the size of the run accordingly
@ -1493,7 +1484,7 @@ void Sort::mergeRuns(USHORT n)
m_free_runs = run->run_next;
if (run->run_buff_alloc)
{
delete (UCHAR*) run->run_buffer;
delete[] run->run_buffer;
run->run_buff_alloc = false;
}
temp_run.run_header.rmh_type = RMH_TYPE_RUN;
@ -1766,7 +1757,7 @@ void Sort::orderAndSave()
run->run_size = run->run_records * key_length;
run->run_seek = m_space->allocateSpace(run->run_size);
char* mem = m_space->inMemory(run->run_seek, run->run_size);
UCHAR* mem = m_space->inMemory(run->run_seek, run->run_size);
if (mem)
{

View File

@ -192,8 +192,8 @@ struct run_control
FB_UINT64 run_seek; // Offset in file of run
FB_UINT64 run_size; // Length of run in work file
sort_record* run_record; // Next record in run
SORTP* run_buffer; // Run buffer
SORTP* run_end_buffer; // End of buffer
UCHAR* run_buffer; // Run buffer
UCHAR* run_end_buffer; // End of buffer
bool run_buff_alloc; // Allocated buffer flag
bool run_buff_cache; // run buffer is already in cache
FB_UINT64 run_mem_seek; // position of run's buffer in in-memory part of sort file
@ -228,14 +228,14 @@ public:
void put(Jrd::thread_db*, ULONG**);
void sort(Jrd::thread_db*);
static FB_UINT64 readBlock(TempSpace* space, FB_UINT64 seek, BLOB_PTR* address, ULONG length)
static FB_UINT64 readBlock(TempSpace* space, FB_UINT64 seek, UCHAR* address, ULONG length)
{
const size_t bytes = space->read(seek, address, length);
fb_assert(bytes == length);
return seek + bytes;
}
static FB_UINT64 writeBlock(TempSpace* space, FB_UINT64 seek, BLOB_PTR* address, ULONG length)
static FB_UINT64 writeBlock(TempSpace* space, FB_UINT64 seek, UCHAR* address, ULONG length)
{
const size_t bytes = space->write(seek, address, length);
fb_assert(bytes == length);
@ -262,8 +262,8 @@ private:
Database* m_dbb; // Database
SortOwner* m_owner; // Sort owner
SORTP* m_memory; // ALLOC: Memory for sort
SORTP* m_end_memory; // End of memory
UCHAR* m_memory; // ALLOC: Memory for sort
UCHAR* m_end_memory; // End of memory
ULONG m_size_memory; // Bytes allocated
SR* m_last_record; // Address of last record
sort_record** m_first_pointer; // Memory for sort