8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-23 18:03:04 +01:00

Memory manager changes - added memory usage stats, empty extents are now deallocated

This commit is contained in:
skidder 2003-01-18 21:45:24 +00:00
parent d0146ce67c
commit 884c38d6c4
10 changed files with 414 additions and 293 deletions

View File

@ -54,6 +54,9 @@
namespace Firebird {
int process_max_memory = 0;
int process_current_memory = 0;
// Helper function to reduce code size, since many compilers
// generate quite a bit of code at the point of the throw.
static void pool_out_of_memory()
@ -227,7 +230,7 @@ void MemoryPool::print_contents(IB_FILE *file, bool used_only) {
lock.leave();
}
MemoryPool* MemoryPool::internal_create(size_t instance_size) {
MemoryPool* MemoryPool::internal_create(size_t instance_size, int *cur_mem, int *max_mem) {
size_t alloc_size = FB_MAX(
// This is the exact initial layout of memory pool in the first extent //
MEM_ALIGN(sizeof(MemoryExtent)) +
@ -250,7 +253,10 @@ MemoryPool* MemoryPool::internal_create(size_t instance_size) {
MEM_ALIGN(sizeof(MemoryExtent)) +
MEM_ALIGN(sizeof(MemoryBlock)) +
MEM_ALIGN(instance_size) +
MEM_ALIGN(sizeof(MemoryBlock)));
MEM_ALIGN(sizeof(MemoryBlock)),
cur_mem, max_mem);
pool->extents_memory = alloc_size - MEM_ALIGN(sizeof(MemoryExtent));
MemoryBlock *poolBlk = (MemoryBlock*) (mem+MEM_ALIGN(sizeof(MemoryExtent)));
poolBlk->pool = pool;
@ -301,11 +307,14 @@ void MemoryPool::deletePool(MemoryPool* pool) {
2. The lock is copied before the extent that contains the pool
itself is freed, because otherwise it contains garbage. The
lock will be destroyed automatically at exit. */
/* skidder: Working with a copy of spinlock or critical section is not
a correct operation. We simply need to delete object earlier */
#ifdef SUPERSERVER
Spinlock lock = pool->lock;
pool->lock.~Spinlock();
#else
SharedSpinlock lock = pool->lock;
pool->lock.~SharedSpinlock();
#endif
if (pool->cur_memory) *pool->cur_memory -= pool->used_memory;
// Delete all extents now
MemoryExtent *temp = pool->extents;
while (temp) {
@ -323,25 +332,25 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
// Lookup a block greater or equal than size in freeBlocks tree
size = MEM_ALIGN(size);
BlockInfo temp = {NULL, size};
void *result;
MemoryBlock* blk;
if (freeBlocks.locate(locGreatEqual,temp)) {
// Found large enough block
BlockInfo* current = &freeBlocks.current();
if (current->length-size < MEM_ALIGN(sizeof(MemoryBlock))+ALLOC_ALIGNMENT) {
blk = current->block;
// Block is small enough to be returned AS IS
current->block->used = true;
current->block->type = type;
blk->used = true;
blk->type = type;
#ifdef DEBUG_GDS_ALLOC
current->block->file = file;
current->block->line = line;
blk->file = file;
blk->line = line;
#endif
result = (char *)current->block + MEM_ALIGN(sizeof(MemoryBlock));
freeBlocks.fastRemove();
} else {
// Cut a piece at the end of block in hope to avoid structural
// modification of free blocks tree
current->block->length -= MEM_ALIGN(sizeof(MemoryBlock))+size;
MemoryBlock *blk = (MemoryBlock *)((char*)current->block +
blk = (MemoryBlock *)((char*)current->block +
MEM_ALIGN(sizeof(MemoryBlock)) + current->block->length);
blk->pool = this;
blk->used = true;
@ -372,7 +381,6 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
freeBlocks.fastRemove();
addFreeBlock(block);
}
result = (char*)blk+MEM_ALIGN(sizeof(MemoryBlock));
}
} else {
// If we are in a critically low memory condition look up for a block in a list
@ -394,6 +402,14 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
prev->next = itr->next;
else
pendingFree = itr->next;
// We can do this w/o any locking because
// (1) -= integer operation is atomic on all current platforms
// (2) nobody will die if max_memory will be a little inprecise
used_memory += temp->length + MEM_ALIGN(sizeof(MemoryBlock));
if (cur_memory) {
*cur_memory += temp->length + MEM_ALIGN(sizeof(MemoryBlock));
if (max_memory && *max_memory < *cur_memory) *max_memory = *cur_memory;
}
PATTERN_FILL(itr,size,ALLOC_PATTERN);
return itr;
} else {
@ -401,7 +417,7 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
// We don't need to modify tree of free blocks or a list of
// pending free blocks in this case
temp->length -= MEM_ALIGN(sizeof(MemoryBlock))+size;
MemoryBlock *blk = (MemoryBlock *)((char*)temp +
blk = (MemoryBlock *)((char*)temp +
MEM_ALIGN(sizeof(MemoryBlock)) + temp->length);
blk->pool = this;
blk->used = true;
@ -416,7 +432,12 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
blk->prev = temp;
if (!blk->last)
((MemoryBlock *)((char*)blk + MEM_ALIGN(sizeof(MemoryBlock)) + blk->length))->prev = blk;
result = (char *)blk + MEM_ALIGN(sizeof(MemoryBlock));
used_memory += blk->length + MEM_ALIGN(sizeof(MemoryBlock));
if (cur_memory) {
*cur_memory += blk->length + MEM_ALIGN(sizeof(MemoryBlock));
if (max_memory && *max_memory < *cur_memory) *max_memory = *cur_memory;
}
void *result = (char *)blk + MEM_ALIGN(sizeof(MemoryBlock));
PATTERN_FILL(result,size,ALLOC_PATTERN);
return result;
}
@ -430,10 +451,11 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
if (!extent) {
return NULL;
}
extents_memory += alloc_size - MEM_ALIGN(sizeof(MemoryExtent));
extent->next = extents;
extents = extent;
MemoryBlock *blk = (MemoryBlock *)((char*)extent+MEM_ALIGN(sizeof(MemoryExtent)));
blk = (MemoryBlock *)((char*)extent+MEM_ALIGN(sizeof(MemoryExtent)));
blk->pool = this;
blk->used = true;
blk->type = type;
@ -460,8 +482,13 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
rest->prev = blk;
addFreeBlock(rest);
}
result = (char*)blk+MEM_ALIGN(sizeof(MemoryBlock));
}
used_memory += blk->length + MEM_ALIGN(sizeof(MemoryBlock));
if (cur_memory) {
*cur_memory += blk->length + MEM_ALIGN(sizeof(MemoryBlock));
if (max_memory && *max_memory < *cur_memory) *max_memory = *cur_memory;
}
void *result = (char*)blk+MEM_ALIGN(sizeof(MemoryBlock));
// Grow spare blocks pool if necessary
PATTERN_FILL(result,size,ALLOC_PATTERN);
return result;
@ -506,12 +533,35 @@ void MemoryPool::removeFreeBlock(MemoryBlock *blk) {
}
}
void MemoryPool::free_blk_extent(MemoryBlock *blk) {
MemoryExtent *extent = (MemoryExtent *)((char *)blk-MEM_ALIGN(sizeof(MemoryExtent)));
MemoryExtent *itr = extents;
if (extents == extent)
extents = extents->next;
else
{
while ( itr ) {
MemoryExtent *next = itr->next;
if (next==extent) {
itr->next = extent->next;
break;
}
itr = next;
}
assert(itr); // We had to find it somewhere
}
extents_memory -= blk->length + MEM_ALIGN(sizeof(MemoryBlock));
external_free(extent);
}
void MemoryPool::deallocate(void *block) {
if (!block) return;
lock.enter();
MemoryBlock *blk = (MemoryBlock *)((char*)block - MEM_ALIGN(sizeof(MemoryBlock))), *prev;
assert(blk->used);
assert(blk->pool==this);
used_memory -= blk->length + MEM_ALIGN(sizeof(MemoryBlock));
if (cur_memory) *cur_memory -= blk->length + MEM_ALIGN(sizeof(MemoryBlock));
// Try to merge block with preceding free block
if ((prev = blk->prev) && !prev->used) {
removeFreeBlock(prev);
@ -535,6 +585,9 @@ void MemoryPool::deallocate(void *block) {
}
}
PATTERN_FILL((char*)prev+MEM_ALIGN(sizeof(MemoryBlock)),prev->length,FREE_PATTERN);
if (!prev->prev && prev->last)
free_blk_extent(prev);
else
addFreeBlock(prev);
} else {
MemoryBlock *next;
@ -550,7 +603,10 @@ void MemoryPool::deallocate(void *block) {
if (!next->last)
((MemoryBlock *)((char *)next+MEM_ALIGN(sizeof(MemoryBlock))+next->length))->prev = blk;
}
PATTERN_FILL((char*)blk+MEM_ALIGN(sizeof(MemoryBlock)),blk->length,FREE_PATTERN);
PATTERN_FILL(block,blk->length,FREE_PATTERN);
if (!blk->prev && blk->last)
free_blk_extent(blk);
else
addFreeBlock(blk);
}
if (needSpare) updateSpare();
@ -559,13 +615,13 @@ void MemoryPool::deallocate(void *block) {
} /* namespace Firebird */
#ifndef TESTING_ONLY
Firebird::MemoryPool* getDefaultMemoryPool() {
if (!Firebird::processMemoryPool) Firebird::processMemoryPool = MemoryPool::createPool();
return Firebird::processMemoryPool;
}
#ifndef TESTING_ONLY
extern "C" {
#ifdef DEBUG_GDS_ALLOC

View File

@ -81,11 +81,17 @@ struct PendingFreeBlock {
PendingFreeBlock *next;
};
extern int process_current_memory, process_max_memory;
// Memory pool based on B+ tree of free memory blocks
// We are going to have two target architectures:
// 1. Multi-process server with customizable lock manager
// 2. Multi-threaded server with single process (SUPERSERVER)
//
// MemoryPool inheritance looks weird because we cannot use
// any pointers to functions in shared memory. VMT usage in
// MemoryPool and its descendants is prohibited
class MemoryPool {
private:
class InternalAllocator {
@ -111,6 +117,8 @@ private:
#else
SharedSpinlock lock;
#endif
int extents_memory; // Sum of memory in allocated extents minus size of extents headers
int used_memory; // Size of used memory blocks including block headers
/* Returns NULL in case it cannot allocate requested chunk */
static void* external_alloc(size_t size);
@ -127,6 +135,8 @@ private:
void removeFreeBlock(MemoryBlock *blk);
void free_blk_extent(MemoryBlock *blk);
// does all the stuff except locking and exceptions
void* internal_alloc(size_t size, SSHORT type = 0
#ifdef DEBUG_GDS_ALLOC
@ -134,12 +144,18 @@ private:
#endif
);
protected:
int *cur_memory;
int *max_memory;
// Do not allow to create and destroy pool directly from outside
MemoryPool(void *first_extent, void *root_page) :
MemoryPool(void *first_extent, void *root_page, int* cur_mem = NULL, int* max_mem = NULL) :
freeBlocks((InternalAllocator*)this, root_page),
extents((MemoryExtent *)first_extent),
needSpare(false),
pendingFree(NULL)
pendingFree(NULL),
/*extents_memory(0), - Initialized in internal_create() */
used_memory(0),
cur_memory(cur_mem),
max_memory(max_mem)
{
}
@ -147,8 +163,16 @@ protected:
~MemoryPool() {
}
static MemoryPool* internal_create(size_t instance_size);
static MemoryPool* internal_create(size_t instance_size,
int *cur_mem = &process_current_memory, int *max_mem = &process_max_memory);
public:
// Move usage stats to another location
void moveStats(int *cur_mem, int *max_mem) {
*cur_mem = *cur_memory;
*max_mem = *max_memory;
cur_memory = cur_mem;
max_memory = max_mem;
}
static MemoryPool* createPool() {
return internal_create(sizeof(MemoryPool));
}

View File

@ -312,7 +312,7 @@ void testAllocator() {
} while (bigItems.getNext());
printf(" DONE\n");
pool->verify_pool();
pool->print_pool(stdout,true);
pool->print_contents(stdout,true);
MemoryPool::deletePool(pool);
// TODO:
// Test critically low memory conditions

View File

@ -1,7 +1,7 @@
# Test for library integrity
# this should be compiled with optimization turned off and with NDEBUG undefined
ulimit -s unlimited
g++ -ggdb -Wall -I../../include -DDEBUG_GDS_ALLOC class_test.cpp alloc.cpp ../fb_exception.cpp 2> aa
g++ -ggdb -Wall -I../../include -DTESTING_ONLY -DDEBUG_GDS_ALLOC class_test.cpp alloc.cpp ../fb_exception.cpp 2> aa
./a.out
# Chose the best algorithm parameters for the target architecture

View File

@ -1,3 +1,6 @@
#ifndef YYSTYPE
#define YYSTYPE int
#endif
#define ACTIVE 257
#define ADD 258
#define AFTER 259
@ -254,3 +257,6 @@
#define INSERTING 510
#define UPDATING 511
#define DELETING 512
extern YYSTYPE yylval;

View File

@ -101,6 +101,31 @@ void ALL_check_memory()
#endif /* DEV_BUILD */
JrdMemoryPool *JrdMemoryPool::createPool(int *cur_mem, int *max_mem) {
JrdMemoryPool *result = (JrdMemoryPool *)internal_create(sizeof(JrdMemoryPool),
cur_mem, max_mem);
result->plb_buckets = NULL;
result->plb_segments = NULL;
result->plb_dccs = NULL;
new (&result->lls_cache) BlockCache<lls> (*result);
return result;
}
JrdMemoryPool *JrdMemoryPool::createPool() {
#ifdef SUPERSERVER
DBB dbb = GET_DBB;
JrdMemoryPool *result = (JrdMemoryPool *)internal_create(sizeof(JrdMemoryPool),
(int*)&dbb->dbb_current_memory, (int*)&dbb->dbb_max_memory);
#else
JrdMemoryPool *result = (JrdMemoryPool *)internal_create(sizeof(JrdMemoryPool));
#endif
result->plb_buckets = NULL;
result->plb_segments = NULL;
result->plb_dccs = NULL;
new (&result->lls_cache) BlockCache<lls> (*result);
return result;
}
TEXT* ALL_cstring(TEXT* in_string)
{
/**************************************

View File

@ -47,14 +47,8 @@ protected:
JrdMemoryPool() : MemoryPool(NULL, NULL), lls_cache(*this) {}
~JrdMemoryPool() {}
public:
static JrdMemoryPool *createPool() {
JrdMemoryPool *result = (JrdMemoryPool *)internal_create(sizeof(JrdMemoryPool));
result->plb_buckets = NULL;
result->plb_segments = NULL;
result->plb_dccs = NULL;
new (&result->lls_cache) BlockCache<lls> (*result);
return result;
}
static JrdMemoryPool *createPool(int *cur_mem, int *max_mem);
static JrdMemoryPool *createPool();
static void deletePool(JrdMemoryPool* pool) {
pool->lls_cache.~BlockCache<lls>();
MemoryPool::deletePool(pool);

View File

@ -383,6 +383,7 @@ int INF_database_info(
length = 0;
break;
#ifdef SUPERSERVER
case isc_info_current_memory:
length = INF_convert(dbb->dbb_current_memory, buffer);
break;
@ -390,6 +391,15 @@ int INF_database_info(
case isc_info_max_memory:
length = INF_convert(dbb->dbb_max_memory, buffer);
break;
#else
case isc_info_current_memory:
length = INF_convert(process_current_memory, buffer);
break;
case isc_info_max_memory:
length = INF_convert(process_max_memory, buffer);
break;
#endif
case isc_info_attachment_id:
length = INF_convert(PAG_attachment_id(), buffer);

View File

@ -5621,9 +5621,15 @@ static DBB init(TDBB tdbb,
tdbb->tdbb_database = 0;
try {
#ifdef SUPERSERVER
int cur_perm = 0, max_perm = 0;
JrdMemoryPool* perm = JrdMemoryPool::createPool(&cur_perm, &max_perm);
dbb_ = dbb::newDbb(*perm);
perm->moveStats((int*)&dbb_->dbb_current_memory, (int*)&dbb_->dbb_max_memory);
#else
JrdMemoryPool* perm = JrdMemoryPool::createPool();
dbb_ = dbb::newDbb(*perm);
#endif
//temp.blk_type = type_dbb;
dbb_->dbb_permanent = perm;
dbb_->dbb_mutexes = temp_mutx;