8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-02-01 08:43:02 +01:00
firebird-mirror/src/common/classes/alloc.cpp

1146 lines
27 KiB
C++
Raw Normal View History

/*
* PROGRAM: Client/Server Common Code
* MODULE: alloc.cpp
* DESCRIPTION: Memory Pool Manager (based on B+ tree)
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Nickolay Samofatov
* for the Firebird Open Source RDBMS project.
*
* Copyright (c) 2004 Nickolay Samofatov <nickolay@broadviewsoftware.com>
* and all contributors signed below.
*
* All Rights Reserved.
*
2009-11-03 14:30:37 +01:00
* The Original Code was created by James A. Starkey for IBPhoenix.
*
* Copyright (c) 2004 James A. Starkey
* All Rights Reserved.
*
* Contributor(s):
*
* Alex Peshkoff <peshkoff@mail.ru>
* added PermanentStorage and AutoStorage classes.
* merged parts of Nickolay and Jim code to be used together
*
*/
2009-04-17 16:10:11 +02:00
// PLEASE, DO NOT CONSTIFY THIS MODULE !!!
2004-08-10 06:10:47 +02:00
#include "firebird.h"
#include "../common/common.h"
#include "../common/classes/alloc.h"
2009-11-03 14:30:37 +01:00
#ifndef WIN_NT
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
2004-03-01 04:18:42 +01:00
#include <sys/mman.h>
#endif
2009-11-03 14:30:37 +01:00
#include "../common/classes/fb_tls.h"
#include "../common/classes/locks.h"
#include "../common/classes/init.h"
#include "../common/classes/Interlock.h"
#include "../common/classes/vector.h"
#include "gen/iberror.h"
2004-07-31 00:38:08 +02:00
#ifdef USE_VALGRIND
#include <valgrind/memcheck.h>
2008-04-27 20:53:33 +02:00
#ifndef VALGRIND_MAKE_WRITABLE // Valgrind 3.3
#define VALGRIND_MAKE_WRITABLE VALGRIND_MAKE_MEM_UNDEFINED
#define VALGRIND_MAKE_NOACCESS VALGRIND_MAKE_MEM_NOACCESS
2004-07-31 00:38:08 +02:00
#endif
2002-12-16 19:33:54 +01:00
2009-11-03 14:30:37 +01:00
#define VALGRIND_FIX_IT // overrides suspicious valgrind behavior
#endif // USE_VALGRIND
namespace {
2009-11-03 14:30:37 +01:00
/*** emergency debugging stuff
2009-11-04 01:20:56 +01:00
static const char* lastFileName;
static int lastLine;
static void* lastBlock;
static void* stopAddress = (void*) 0x2254938;
2009-11-03 14:30:37 +01:00
***/
#ifdef MEM_DEBUG
static const int GUARD_BYTES = Firebird::ALLOC_ALIGNMENT; // * 2048;
static const UCHAR INIT_BYTE = 0xCC;
static const UCHAR GUARD_BYTE = 0xDD;
static const UCHAR DEL_BYTE = 0xEE;
2009-11-03 14:30:37 +01:00
#else
2009-11-04 01:20:56 +01:00
static const int GUARD_BYTES = 0;
#endif
2009-11-03 14:30:37 +01:00
template <typename T>
T absVal(T n) throw ()
{
2009-11-03 14:30:37 +01:00
return n < 0 ? -n : n;
}
#ifdef USE_VALGRIND
// Circular FIFO buffer of read/write protected extents pending free operation
// Race protected via cache_mutex.
2008-04-19 13:11:10 +02:00
struct DelayedExtent
{
2009-02-05 02:08:13 +01:00
void* memory; // Extent pointer
size_t size; // Size of extent
int handle; // Valgrind handle of protected extent block
};
DelayedExtent delayedExtents[DELAYED_EXTENT_COUNT];
size_t delayedExtentCount = 0;
size_t delayedExtentsPos = 0;
#endif
2009-11-03 14:30:37 +01:00
} // anonymous namespace
namespace Firebird {
// This is required for modules that do not define any GlobalPtr themself
GlobalPtr<Mutex> forceCreationOfDefaultMemoryPool;
2009-11-03 14:30:37 +01:00
MemoryPool* MemoryPool::defaultMemoryManager = NULL;
MemoryStats* MemoryPool::default_stats_group = NULL;
Mutex* cache_mutex = NULL;
// Initialize process memory pool (called from InstanceControl).
void MemoryPool::init()
{
static char mtxBuffer[sizeof(Mutex) + ALLOC_ALIGNMENT];
2008-12-24 08:46:11 +01:00
cache_mutex = new((void*)(IPTR) MEM_ALIGN((size_t)(IPTR) mtxBuffer)) Mutex;
static char msBuffer[sizeof(MemoryStats) + ALLOC_ALIGNMENT];
2009-11-03 14:30:37 +01:00
default_stats_group =
new((void*)(IPTR) MEM_ALIGN((size_t)(IPTR) msBuffer)) MemoryStats;
2009-11-03 14:30:37 +01:00
static char mpBuffer[sizeof(MemoryPool) + ALLOC_ALIGNMENT];
defaultMemoryManager =
new((void*)(IPTR) MEM_ALIGN((size_t)(IPTR) mpBuffer)) MemoryPool;
}
// Should be last routine, called by InstanceControl,
// being therefore the very last routine in firebird module.
void MemoryPool::cleanup()
{
2009-11-03 14:30:37 +01:00
#ifdef VALGRIND_FIX_IT
VALGRIND_DISCARD(
VALGRIND_MAKE_MEM_DEFINED(cache_mutex, sizeof(Mutex)));
VALGRIND_DISCARD(
VALGRIND_MAKE_MEM_DEFINED(default_stats_group, sizeof(MemoryStats)));
VALGRIND_DISCARD(
VALGRIND_MAKE_MEM_DEFINED(defaultMemoryManager, sizeof(MemoryPool)));
#endif
2009-11-04 01:20:56 +01:00
2010-10-12 10:02:57 +02:00
if (defaultMemoryManager)
{
defaultMemoryManager->~MemoryPool();
defaultMemoryManager = NULL;
}
2010-10-12 19:40:27 +02:00
2010-10-12 10:02:57 +02:00
if (default_stats_group)
{
default_stats_group->~MemoryStats();
default_stats_group = NULL;
}
2010-10-12 19:40:27 +02:00
2010-10-12 10:02:57 +02:00
if (cache_mutex)
{
cache_mutex->~Mutex();
cache_mutex = NULL;
}
}
MemoryPool::MemoryPool(bool shared, int rounding, int cutoff, int minAlloc)
2009-11-04 01:20:56 +01:00
: roundingSize(rounding), threshold(cutoff), minAllocation(minAlloc),
threadShared(shared), pool_destroying(false), stats(default_stats_group), parent(NULL)
{
2009-11-04 11:51:27 +01:00
size_t vecSize = (cutoff + rounding) / rounding;
size_t l = vecSize * sizeof(void*);
init(allocRaw(l), l);
}
MemoryPool::MemoryPool(MemoryPool& p, MemoryStats& s, bool shared, int rounding, int cutoff, int minAlloc)
: roundingSize(rounding), threshold(cutoff), minAllocation(minAlloc),
threadShared(shared), pool_destroying(false), stats(&s), parent(&p)
{
size_t vecSize = (cutoff + rounding) / rounding;
size_t l = vecSize * sizeof(void*);
init(parent->allocate(l), l);
}
void MemoryPool::init(void* memory, size_t length)
{
freeObjects = (MemBlock**) memory;
memset(freeObjects, 0, length);
2009-11-03 14:30:37 +01:00
bigHunks = NULL;
smallHunks = NULL;
freeBlocks.nextLarger = freeBlocks.priorSmaller = &freeBlocks;
junk.nextLarger = junk.priorSmaller = &junk;
blocksAllocated = 0;
blocksActive = 0;
2009-11-03 14:30:37 +01:00
#ifdef USE_VALGRIND
delayedFreeCount = 0;
delayedFreePos = 0;
2003-01-30 14:26:16 +01:00
2009-11-03 14:30:37 +01:00
VALGRIND_CREATE_MEMPOOL(this, VALGRIND_REDZONE, 0);
#endif
}
2009-11-03 14:30:37 +01:00
MemoryPool::~MemoryPool(void)
2004-08-21 11:18:24 +02:00
{
2009-11-03 14:30:37 +01:00
pool_destroying = true;
2009-11-03 14:30:37 +01:00
decrement_usage(used_memory.value());
decrement_mapping(mapped_memory.value());
2009-11-03 14:30:37 +01:00
#ifdef USE_VALGRIND
VALGRIND_DESTROY_MEMPOOL(this);
2009-11-03 14:30:37 +01:00
// Do not forget to discard stack traces for delayed free blocks
for (size_t i = 0; i < delayedFreeCount; i++)
2009-02-03 12:02:00 +01:00
{
2009-11-03 14:30:37 +01:00
MemBlock* block = delayedFree[i];
2009-11-04 11:51:27 +01:00
void* object = &block->body;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
VALGRIND_DISCARD(
VALGRIND_MAKE_MEM_DEFINED(block, OFFSET(MemBlock*, body)));
VALGRIND_DISCARD(
VALGRIND_MAKE_WRITABLE(object, block->length));
}
2009-11-03 14:30:37 +01:00
#endif
if (parent)
{
MemoryPool::release(freeObjects);
}
else
{
releaseRaw(freeObjects, ((threshold + roundingSize) / roundingSize) * sizeof(void*));
}
2009-11-03 14:30:37 +01:00
freeObjects = NULL;
2009-11-04 01:20:56 +01:00
for (MemSmallHunk* hunk; hunk = smallHunks;)
{
2009-11-03 14:30:37 +01:00
smallHunks = hunk->nextHunk;
releaseRaw (hunk, minAllocation);
2009-11-04 01:20:56 +01:00
}
for (MemBigHunk* hunk; hunk = bigHunks;)
2009-11-03 14:30:37 +01:00
{
bigHunks = hunk->nextHunk;
2009-11-04 01:20:56 +01:00
releaseRaw(hunk, hunk->length);
2004-03-01 04:18:42 +01:00
}
2009-11-03 14:30:37 +01:00
}
MemoryPool* MemoryPool::createPool(MemoryPool* parentPool, MemoryStats& stats)
{
if (!parentPool)
{
parentPool = getDefaultMemoryPool();
}
return FB_NEW(*parentPool) MemoryPool(*parentPool, stats);
}
2009-11-03 14:30:37 +01:00
void MemoryPool::setStatsGroup(MemoryStats& newStats) throw ()
{
MutexLockGuard guard(mutex, "MemoryPool::setStatsGroup");
2009-11-03 14:30:37 +01:00
const size_t sav_used_memory = used_memory.value();
const size_t sav_mapped_memory = mapped_memory.value();
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
stats->decrement_mapping(sav_mapped_memory);
stats->decrement_usage(sav_used_memory);
2008-12-24 08:46:11 +01:00
2009-11-03 14:30:37 +01:00
this->stats = &newStats;
2008-12-24 08:46:11 +01:00
2009-11-03 14:30:37 +01:00
stats->increment_mapping(sav_mapped_memory);
stats->increment_usage(sav_used_memory);
}
2008-12-05 02:20:14 +01:00
MemBlock* MemoryPool::alloc(const size_t length) throw (std::bad_alloc)
2008-04-19 13:11:10 +02:00
{
2009-11-03 14:30:37 +01:00
MutexLockGuard guard(mutex, "MemoryPool::alloc");
2009-11-03 14:30:37 +01:00
// If this is a small block, look for it there
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (length <= threshold)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
unsigned int slot = length / roundingSize;
2009-11-04 01:20:56 +01:00
MemBlock* block;
2009-11-03 14:30:37 +01:00
if (threadShared)
2009-11-04 01:20:56 +01:00
{
while (block = freeObjects[slot])
{
if (COMPARE_EXCHANGE_POINTER(freeObjects + slot, block, (void*) block->pool))
2009-11-03 14:30:37 +01:00
{
#ifdef MEM_DEBUG
if (slot != block->length / roundingSize)
2009-11-04 01:20:56 +01:00
corrupt("length trashed for block in slot");
#endif
2009-11-03 14:30:37 +01:00
return block;
}
2009-11-04 01:20:56 +01:00
}
}
2009-11-03 14:30:37 +01:00
else
2009-11-04 01:20:56 +01:00
{
block = freeObjects[slot];
2009-11-03 14:30:37 +01:00
if (block)
2009-11-04 01:20:56 +01:00
{
freeObjects[slot] = (MemBlock*) block->pool;
2009-11-03 14:30:37 +01:00
#ifdef MEM_DEBUG
if (slot != block->length / roundingSize)
2009-11-04 01:20:56 +01:00
corrupt("length trashed for block in slot");
2009-11-03 14:30:37 +01:00
#endif
return block;
}
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
// See if some other hunk has unallocated space to use
2009-11-04 01:20:56 +01:00
MemSmallHunk* hunk;
2009-11-03 14:30:37 +01:00
for (hunk = smallHunks; hunk; hunk = hunk->nextHunk)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if (length <= hunk->spaceRemaining)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
MemBlock *block = (MemBlock*) hunk->memory;
hunk->memory += length;
hunk->spaceRemaining -= length;
block->length = length;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
return block;
2009-11-04 01:20:56 +01:00
}
}
2009-11-03 14:30:37 +01:00
// No good so far. Time for a new hunk
2009-11-04 01:20:56 +01:00
hunk = (MemSmallHunk*) allocRaw(minAllocation);
2009-11-03 14:30:37 +01:00
hunk->length = minAllocation - 16;
hunk->nextHunk = smallHunks;
smallHunks = hunk;
2009-11-04 01:20:56 +01:00
2009-11-04 11:51:27 +01:00
size_t l = ROUNDUP(sizeof(MemSmallHunk), sizeof(double));
2009-11-03 14:30:37 +01:00
block = (MemBlock*) ((UCHAR*) hunk + l);
hunk->spaceRemaining = minAllocation - length - l;
hunk->memory = (UCHAR*) block + length;
block->length = length;
2009-11-04 01:20:56 +01:00
return block;
}
2009-11-03 14:30:37 +01:00
/*
* OK, we've got a "big block" on on hands. To maximize confusing, the indicated
* length of a free big block is the length of MemHeader plus body, explicitly
* excluding the MemFreeBlock and MemBigHeader fields.
2009-11-04 01:20:56 +01:00
[MemHeader::length]
2009-11-03 14:30:37 +01:00
<---- MemBlock ---->
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
*--------------*----------*---------*
| MemBigHeader | MemHeader | Body |
*--------------*----------*---------*
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
<---- MemBigObject ----->
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
*--------------*----------*---------------*
| MemBigHeader | MemHeader | MemFreeBlock |
*--------------*----------*---------------*
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
<--------------- MemFreeBlock ---------->
*/
2009-11-04 01:20:56 +01:00
MemFreeBlock* freeBlock;
2009-11-03 14:30:37 +01:00
for (freeBlock = freeBlocks.nextLarger; freeBlock != &freeBlocks; freeBlock = freeBlock->nextLarger)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if (freeBlock->memHeader.length >= length)
2009-11-04 01:20:56 +01:00
{
remove(freeBlock);
MemBlock* block = (MemBlock*) &freeBlock->memHeader;
2009-11-03 14:30:37 +01:00
// Compute length (MemHeader + body) for new free block
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
unsigned int tail = block->length - length;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
// If there isn't room to split off a new free block, allocate the whole thing
2009-11-04 01:20:56 +01:00
if (tail < sizeof(MemFreeBlock))
{
2009-11-03 14:30:37 +01:00
block->pool = this;
return block;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
// Otherwise, chop up the block
2009-11-04 01:20:56 +01:00
MemBigObject* newBlock = freeBlock;
2009-11-03 14:30:37 +01:00
freeBlock = (MemFreeBlock*) ((UCHAR*) block + length);
2009-11-04 01:20:56 +01:00
freeBlock->memHeader.length = tail - sizeof(MemBigObject);
2009-11-03 14:30:37 +01:00
block->length = length;
block->pool = this;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (freeBlock->next = newBlock->next)
freeBlock->next->prior = freeBlock;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
newBlock->next = freeBlock;
freeBlock->prior = newBlock;
freeBlock->memHeader.pool = NULL; // indicate block is free
2009-11-04 01:20:56 +01:00
insert(freeBlock);
2009-11-03 14:30:37 +01:00
return block;
2009-11-04 01:20:56 +01:00
}
}
2003-01-10 13:27:57 +01:00
2009-11-03 14:30:37 +01:00
// Didn't find existing space -- allocate new hunk
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
size_t hunkLength = sizeof (MemBigHunk) + sizeof(MemBigHeader) + length;
size_t freeSpace = 0;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
// If the hunk size is sufficient below minAllocation, allocate extra space
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (hunkLength + sizeof(MemBigObject) + threshold < minAllocation)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
hunkLength = minAllocation;
//freeSpace = hunkLength - 2 * sizeof(MemBigObject) - length;
freeSpace = hunkLength - sizeof(MemBigHunk) - 2 * sizeof(MemBigHeader) - length;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
// Allocate the new hunk
2009-11-04 01:20:56 +01:00
MemBigHunk* hunk = (MemBigHunk*) allocRaw(hunkLength);
2009-11-03 14:30:37 +01:00
hunk->nextHunk = bigHunks;
bigHunks = hunk;
hunk->length = hunkLength;
// Create the new block
2009-11-04 01:20:56 +01:00
MemBigObject* newBlock = (MemBigObject*) &hunk->blocks;
2009-11-03 14:30:37 +01:00
newBlock->prior = NULL;
newBlock->next = NULL;
2009-11-04 01:20:56 +01:00
MemBlock* block = (MemBlock*) &newBlock->memHeader;
2009-11-03 14:30:37 +01:00
block->pool = this;
block->length = length;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
// If there is space left over, create a free block
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (freeSpace)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
freeBlock = (MemFreeBlock*) ((UCHAR*) block + length);
freeBlock->memHeader.length = freeSpace;
freeBlock->memHeader.pool = NULL;
freeBlock->next = NULL;
freeBlock->prior = newBlock;
newBlock->next = freeBlock;
2009-11-04 01:20:56 +01:00
insert(freeBlock);
}
return block;
2003-01-10 13:27:57 +01:00
}
2009-11-03 14:30:37 +01:00
void* MemoryPool::allocate(size_t size
#ifdef DEBUG_GDS_ALLOC
2009-11-03 14:30:37 +01:00
, const char* fileName, int line
#endif
2009-11-03 14:30:37 +01:00
) throw (std::bad_alloc)
{
2009-11-04 11:51:27 +01:00
size_t length = ROUNDUP(size + VALGRIND_REDZONE, roundingSize) + OFFSET(MemBlock*, body) + GUARD_BYTES;
2009-11-04 01:20:56 +01:00
MemBlock* memory = alloc(length);
2004-07-31 00:38:08 +02:00
#ifdef USE_VALGRIND
2009-11-03 14:30:37 +01:00
VALGRIND_MEMPOOL_ALLOC(this, &memory->body, size);
2004-07-31 00:38:08 +02:00
#endif
2008-04-19 13:11:10 +02:00
2009-11-03 14:30:37 +01:00
memory->pool = this;
2008-04-23 04:19:25 +02:00
2009-11-03 14:30:37 +01:00
#ifdef DEBUG_GDS_ALLOC
memory->fileName = fileName;
memory->lineNumber = line;
memset (&memory->body, INIT_BYTE, size);
memset (&memory->body + size, GUARD_BYTE, memory->length - size - OFFSET(MemBlock*,body));
2009-11-03 14:30:37 +01:00
#endif
2008-04-23 04:19:25 +02:00
2009-11-03 14:30:37 +01:00
++blocksAllocated;
++blocksActive;
increment_usage(memory->length);
2009-11-03 14:30:37 +01:00
return &memory->body;
}
void MemoryPool::release(void* object) throw ()
{
if (object)
2008-12-24 08:46:11 +01:00
{
2009-11-03 14:30:37 +01:00
MemBlock* block = (MemBlock*) ((UCHAR*) object - OFFSET(MemBlock*, body));
MemoryPool* pool = block->pool;
2004-07-31 00:38:08 +02:00
#ifdef USE_VALGRIND
2009-11-03 14:30:37 +01:00
// Synchronize delayed free queue using pool mutex
MutexLockGuard guard (pool->mutex, "MemoryPool::deallocate USE_VALGRIND");
// Notify Valgrind that block is freed from the pool
VALGRIND_MEMPOOL_FREE(pool, object);
// block is placed in delayed buffer - mark as NOACCESS for that time
VALGRIND_DISCARD(VALGRIND_MAKE_NOACCESS(block, OFFSET(MemBlock*, body)));
// Extend circular buffer if possible
2009-11-04 01:20:56 +01:00
if (pool->delayedFreeCount < FB_NELEM(pool->delayedFree))
2009-11-03 14:30:37 +01:00
{
pool->delayedFree[pool->delayedFreeCount] = block;
pool->delayedFreeCount++;
return;
}
// Shift circular buffer pushing out oldest item
MemBlock* requested_block = block;
block = pool->delayedFree[pool->delayedFreePos];
2009-11-04 11:51:27 +01:00
object = &block->body;
2009-11-03 14:30:37 +01:00
// Re-enable access to MemBlock
VALGRIND_DISCARD(VALGRIND_MAKE_MEM_DEFINED(block, OFFSET(MemBlock*, body)));
// Remove protection from memory block
#ifdef VALGRIND_FIX_IT
VALGRIND_DISCARD(
VALGRIND_MAKE_MEM_DEFINED(object, block->length - VALGRIND_REDZONE));
2009-11-03 14:30:37 +01:00
#else
VALGRIND_DISCARD(
VALGRIND_MAKE_WRITABLE(object, block->length - VALGRIND_REDZONE));
2008-12-05 02:20:14 +01:00
#endif
2009-11-03 14:30:37 +01:00
// Replace element in circular buffer
pool->delayedFree[pool->delayedFreePos] = requested_block;
// Move queue pointer to next element and cycle if needed
if (++(pool->delayedFreePos) >= FB_NELEM(pool->delayedFree))
pool->delayedFreePos = 0;
#endif
2009-11-03 14:30:37 +01:00
size_t size = block->length;
#ifdef DEBUG_GDS_ALLOC
2009-11-03 14:30:37 +01:00
block->fileName = NULL;
#endif
2009-11-03 14:30:37 +01:00
pool->releaseBlock(block);
pool->decrement_usage(size);
}
}
2009-11-03 14:30:37 +01:00
void MemoryPool::releaseBlock(MemBlock *block) throw ()
2008-04-19 13:11:10 +02:00
{
2009-11-03 14:30:37 +01:00
if (!freeObjects)
return;
2004-08-10 06:10:47 +02:00
2009-11-03 14:30:37 +01:00
if (block->pool != this)
corrupt("bad block released");
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
#ifdef MEM_DEBUG
for (const UCHAR* end = (UCHAR*) block + block->length, *p = end - GUARD_BYTES; p < end;)
2010-01-26 09:20:27 +01:00
{
2009-11-03 14:30:37 +01:00
if (*p++ != GUARD_BYTE)
2009-11-04 01:20:56 +01:00
corrupt("guard bytes overwritten");
2010-01-26 09:20:27 +01:00
}
2009-11-03 14:30:37 +01:00
#endif
2004-08-10 06:10:47 +02:00
2009-11-03 14:30:37 +01:00
--blocksActive;
const size_t length = block->length;
2004-08-10 06:10:47 +02:00
// If length is less than threshold, this is a small block
2009-11-04 01:20:56 +01:00
if (length <= threshold)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
#ifdef MEM_DEBUG
memset (&block->body, DEL_BYTE, length - OFFSET(MemBlock*, body));
2009-11-03 14:30:37 +01:00
#endif
2009-11-03 14:30:37 +01:00
if (threadShared)
2009-11-04 01:20:56 +01:00
{
for (int slot = length / roundingSize;;)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
void *next = freeObjects [slot];
block->pool = (MemoryPool*) next;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (COMPARE_EXCHANGE_POINTER(freeObjects + slot, next, block))
return;
2009-11-04 01:20:56 +01:00
}
}
2008-02-20 16:25:20 +01:00
int slot = length / roundingSize;
2009-11-03 14:30:37 +01:00
void *next = freeObjects [slot];
block->pool = (MemoryPool*) next;
freeObjects[slot] = block;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
return;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
// OK, this is a large block. Try recombining with neighbors
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
MutexLockGuard guard(mutex, "MemoryPool::release");
2009-11-03 14:30:37 +01:00
#ifdef MEM_DEBUG
memset (&block->body, DEL_BYTE, length - OFFSET(MemBlock*, body));
#endif
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
MemFreeBlock *freeBlock = (MemFreeBlock*) ((UCHAR*) block - sizeof (MemBigHeader));
block->pool = NULL;
if (freeBlock->next && !freeBlock->next->memHeader.pool)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
MemFreeBlock *next = (MemFreeBlock*) (freeBlock->next);
remove (next);
freeBlock->memHeader.length += next->memHeader.length + sizeof (MemBigHeader);
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (freeBlock->next = next->next)
freeBlock->next->prior = freeBlock;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
if (freeBlock->prior && !freeBlock->prior->memHeader.pool)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
MemFreeBlock *prior = (MemFreeBlock*) (freeBlock->prior);
remove (prior);
prior->memHeader.length += freeBlock->memHeader.length + sizeof (MemBigHeader);
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (prior->next = freeBlock->next)
prior->next->prior = prior;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
freeBlock = prior;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
// If the block has no neighbors, the entire hunk is empty and can be unlinked and
// released
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (freeBlock->prior == NULL && freeBlock->next == NULL)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
for (MemBigHunk **ptr = &bigHunks, *hunk; hunk = *ptr; ptr = &hunk->nextHunk)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if (&hunk->blocks == freeBlock)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
*ptr = hunk->nextHunk;
decrement_mapping(hunk->length);
releaseRaw(hunk, hunk->length);
return;
2009-11-04 01:20:56 +01:00
}
}
2009-11-03 14:30:37 +01:00
corrupt("can't find big hunk");
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
insert (freeBlock);
}
2009-11-03 14:30:37 +01:00
void MemoryPool::corrupt(const char* text) throw ()
{
#ifdef DEV_BUILD
fprintf(stderr, "%s\n", text);
abort();
#endif
2009-11-03 14:30:37 +01:00
}
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
void MemoryPool::memoryIsExhausted(void) throw (std::bad_alloc)
{
Firebird::BadAlloc::raise();
}
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
void MemoryPool::remove(MemFreeBlock* block) throw ()
{
// If this is junk, chop it out and be done with it
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (block->memHeader.length < threshold)
return;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
// If we're a twin, take out of the twin list
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (!block->nextLarger)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
block->nextTwin->priorTwin = block->priorTwin;
block->priorTwin->nextTwin = block->nextTwin;
return;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
// We're in the primary list. If we have twin, move him in
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
MemFreeBlock *twin = block->nextTwin;
2008-12-05 02:20:14 +01:00
2009-11-03 14:30:37 +01:00
if (twin != block)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
block->priorTwin->nextTwin = twin;
twin->priorTwin = block->priorTwin;
twin->priorSmaller = block->priorSmaller;
twin->nextLarger = block->nextLarger;
twin->priorSmaller->nextLarger = twin;
twin->nextLarger->priorSmaller = twin;
return;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
// No twins. Just take the guy out of the list
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
block->priorSmaller->nextLarger = block->nextLarger;
block->nextLarger->priorSmaller = block->priorSmaller;
}
2009-11-03 14:30:37 +01:00
void MemoryPool::insert(MemFreeBlock* freeBlock) throw ()
{
2009-11-03 14:30:37 +01:00
// If this is junk (too small for pool), stick it in junk
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (freeBlock->memHeader.length < threshold)
return;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
// Start by finding insertion point
MemFreeBlock *block;
2009-11-04 01:20:56 +01:00
for (block = freeBlocks.nextLarger;
2009-11-03 14:30:37 +01:00
block != &freeBlocks && freeBlock->memHeader.length >= block->memHeader.length;
block = block->nextLarger)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if (block->memHeader.length == freeBlock->memHeader.length)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
// If this is a "twin" (same size block), hang off block
freeBlock->nextTwin = block->nextTwin;
freeBlock->nextTwin->priorTwin = freeBlock;
freeBlock->priorTwin = block;
block->nextTwin = freeBlock;
freeBlock->nextLarger = NULL;
return;
2009-11-04 01:20:56 +01:00
}
}
2009-11-03 14:30:37 +01:00
// OK, then, link in after insertion point
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
freeBlock->nextLarger = block;
freeBlock->priorSmaller = block->priorSmaller;
block->priorSmaller->nextLarger = freeBlock;
block->priorSmaller = freeBlock;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
freeBlock->nextTwin = freeBlock->priorTwin = freeBlock;
}
2009-11-03 14:30:37 +01:00
namespace {
2009-11-03 14:30:37 +01:00
// We cache this amount of extents to avoid memory mapping overhead
const int MAP_CACHE_SIZE = 16; // == 1 MB
2009-11-03 14:30:37 +01:00
Vector<void*, MAP_CACHE_SIZE> extents_cache;
volatile size_t map_page_size = 0;
int dev_zero_fd = 0;
#if defined(WIN_NT)
size_t get_page_size()
{
2009-11-03 14:30:37 +01:00
SYSTEM_INFO info;
GetSystemInfo(&info);
return info.dwPageSize;
}
#else
size_t get_page_size()
{
return sysconf(_SC_PAGESIZE);
}
#endif
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
inline size_t get_map_page_size()
{
if (! map_page_size)
{
2009-11-03 14:30:37 +01:00
MutexLockGuard guard(*cache_mutex);
if (! map_page_size)
map_page_size = get_page_size();
}
2009-11-03 14:30:37 +01:00
return map_page_size;
}
2002-12-16 19:33:54 +01:00
}
2009-11-03 14:30:37 +01:00
void* MemoryPool::allocRaw(size_t size) throw (std::bad_alloc)
{
2004-07-31 00:38:08 +02:00
#ifndef USE_VALGRIND
if (size == DEFAULT_ALLOCATION)
2009-02-03 12:02:00 +01:00
{
2009-11-03 14:30:37 +01:00
MutexLockGuard guard(*cache_mutex);
void* result = NULL;
if (extents_cache.getCount()) {
// Use most recently used object to encourage caching
result = extents_cache[extents_cache.getCount() - 1];
extents_cache.shrink(extents_cache.getCount() - 1);
}
if (result) {
return result;
}
2008-12-05 02:20:14 +01:00
}
2004-07-31 00:38:08 +02:00
#endif
2009-11-03 14:30:37 +01:00
size = FB_ALIGN(size, get_map_page_size());
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
#ifdef WIN_NT
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
void* result = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
if (!result)
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
#else // WIN_NT
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
#ifdef MAP_ANONYMOUS
2004-07-31 00:38:08 +02:00
2009-11-03 14:30:37 +01:00
#ifdef SOLARIS
#define FB_MMAP_FLAGS MAP_PRIVATE | MAP_ANON
#else // SOLARIS
#define FB_MMAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
#endif // SOLARIS
2009-11-03 14:30:37 +01:00
void* result = mmap(NULL, size, PROT_READ | PROT_WRITE, FB_MMAP_FLAGS, -1, 0);
2008-01-29 12:57:35 +01:00
2009-11-03 14:30:37 +01:00
#else // MAP_ANONYMOUS
2009-11-03 14:30:37 +01:00
if (dev_zero_fd < 0)
dev_zero_fd = open("/dev/zero", O_RDWR);
void* result = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero_fd, 0);
2004-07-31 00:38:08 +02:00
2009-11-03 14:30:37 +01:00
#endif // MAP_ANONYMOUS
2008-02-20 16:25:20 +01:00
2009-11-04 01:20:56 +01:00
if (result == MAP_FAILED)
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
#endif // WIN_NT
2008-02-20 16:25:20 +01:00
2009-02-05 02:08:13 +01:00
{
2009-11-03 14:30:37 +01:00
// failure happens!
memoryIsExhausted();
return NULL;
}
2008-02-20 16:25:20 +01:00
2004-07-31 00:38:08 +02:00
#ifdef USE_VALGRIND
2009-11-03 14:30:37 +01:00
// Let Valgrind forget that block was zero-initialized
VALGRIND_DISCARD(VALGRIND_MAKE_WRITABLE(result, size));
2004-07-31 00:38:08 +02:00
#endif
2009-11-03 14:30:37 +01:00
increment_mapping(size);
return result;
}
2009-11-03 14:30:37 +01:00
void MemoryPool::validateFreeList(void) throw ()
{
size_t len = 0;
int count = 0;
MemFreeBlock *block;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
for (block = freeBlocks.nextLarger; block != &freeBlocks; block = block->nextLarger)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if (block->memHeader.length <= len)
corrupt ("bad free list\n");
len = block->memHeader.length;
++count;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
len += 1;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
for (block = freeBlocks.priorSmaller; block != &freeBlocks; block = block->priorSmaller)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if (block->memHeader.length >= len)
corrupt ("bad free list\n");
len = block->memHeader.length;
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
}
2009-11-03 14:30:37 +01:00
void MemoryPool::validateBigBlock(MemBigObject* block) throw ()
{
MemBigObject *neighbor;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (neighbor = block->prior)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if ((UCHAR*) &neighbor->memHeader + neighbor->memHeader.length != (UCHAR*) block)
corrupt ("bad neighbors");
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
if (neighbor = block->next)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
if ((UCHAR*) &block->memHeader + block->memHeader.length != (UCHAR*) neighbor)
corrupt ("bad neighbors");
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
}
void MemoryPool::releaseRaw(void *block, size_t size) throw ()
{
#ifndef USE_VALGRIND
if (size == DEFAULT_ALLOCATION)
2009-11-04 01:20:56 +01:00
{
2009-11-03 14:30:37 +01:00
MutexLockGuard guard(*cache_mutex);
2009-11-04 01:20:56 +01:00
if (extents_cache.getCount() < extents_cache.getCapacity())
{
2009-11-03 14:30:37 +01:00
extents_cache.add(block);
return;
}
}
2009-11-03 14:30:37 +01:00
#else
// Set access protection for block to prevent memory from deleted pool being accessed
int handle = /* //VALGRIND_MAKE_NOACCESS */ VALGRIND_MAKE_MEM_DEFINED(block, size);
size = FB_ALIGN(size, get_map_page_size());
void* unmapBlockPtr = block;
size_t unmapBlockSize = size;
// Employ extents delayed free logic only when pool is destroying.
// In normal case all blocks pass through queue of sufficent length by themselves
if (pool_destroying)
2009-02-03 12:02:00 +01:00
{
2009-11-03 14:30:37 +01:00
// Synchronize delayed free queue using extents mutex
MutexLockGuard guard(*cache_mutex);
2008-04-19 13:11:10 +02:00
2009-11-03 14:30:37 +01:00
// Extend circular buffer if possible
2009-11-04 01:20:56 +01:00
if (delayedExtentCount < FB_NELEM(delayedExtents))
{
2009-11-03 14:30:37 +01:00
DelayedExtent* item = &delayedExtents[delayedExtentCount];
item->memory = block;
item->size = size;
item->handle = handle;
delayedExtentCount++;
return;
}
2009-11-03 14:30:37 +01:00
DelayedExtent* item = &delayedExtents[delayedExtentsPos];
2009-07-15 17:28:04 +02:00
2009-11-03 14:30:37 +01:00
// Free message associated with old extent in Valgrind
VALGRIND_DISCARD(item->handle);
// Set up the block we are going to unmap
unmapBlockPtr = item->memory;
unmapBlockSize = item->size;
// Replace element in circular buffer
item->memory = block;
item->handle = handle;
item->size = size;
// Move queue pointer to next element and cycle if needed
delayedExtentsPos++;
if (delayedExtentsPos >= FB_NELEM(delayedExtents))
delayedExtentsPos = 0;
}
2009-11-04 01:20:56 +01:00
else
{
2009-11-03 14:30:37 +01:00
// Let Valgrind forget about unmapped block
VALGRIND_DISCARD(handle);
}
#endif
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
size = FB_ALIGN(size, get_map_page_size());
#ifdef WIN_NT
if (!VirtualFree(block, 0, MEM_RELEASE))
#else // WIN_NT
#if (defined SOLARIS) && (defined HAVE_CADDR_T)
if (munmap((caddr_t) block, size))
#else
if (munmap(block, size))
#endif
#endif // WIN_NT
corrupt ("OS memory deallocation error");
}
2009-07-14 03:18:17 +02:00
2009-11-03 14:30:37 +01:00
void MemoryPool::globalFree(void* block) throw ()
{
deallocate(block);
}
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
void* MemoryPool::calloc(size_t size
#ifdef DEBUG_GDS_ALLOC
2009-11-03 14:30:37 +01:00
, const char* fileName, int line
#endif
2009-11-03 14:30:37 +01:00
) throw (std::bad_alloc)
{
void *block = allocate((int) size
#ifdef DEBUG_GDS_ALLOC
, fileName, line
#endif
);
memset (block, 0, size);
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
return block;
}
2009-11-03 14:30:37 +01:00
void MemoryPool::deallocate(void* block) throw ()
{
2009-11-03 14:30:37 +01:00
release(block);
}
2004-08-10 06:10:47 +02:00
2009-11-03 14:30:37 +01:00
void MemoryPool::deletePool(MemoryPool* pool)
{
delete pool;
}
2004-08-10 06:10:47 +02:00
2009-11-03 14:30:37 +01:00
void MemoryPool::validate(void) throw ()
{
unsigned int slot = 3;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
for (const MemBlock *block = freeObjects [slot]; block; block = (MemBlock*) block->pool)
2009-11-04 01:20:56 +01:00
{
2011-04-02 06:13:03 +02:00
if (slot != block->length / roundingSize)
2009-11-03 14:30:37 +01:00
corrupt ("length trashed for block in slot");
2009-11-04 01:20:56 +01:00
}
}
2008-12-05 02:20:14 +01:00
2009-11-03 14:30:37 +01:00
void MemoryPool::print_contents(const char* filename, bool used_only, const char* filter_path) throw ()
{
2009-11-03 14:30:37 +01:00
FILE* out = fopen(filename, "w");
if (!out)
2004-08-10 06:10:47 +02:00
return;
2009-11-03 14:30:37 +01:00
print_contents(out, used_only, filter_path);
fclose(out);
}
#ifdef MEM_DEBUG
static void print_block(bool used, FILE* file, MemHeader* blk, bool used_only,
const char* filter_path, const size_t filter_len) throw ()
{
if (used || !used_only)
{
2009-11-03 14:30:37 +01:00
bool filter = filter_path != NULL;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (used && filter && blk->fileName)
filter = strncmp(filter_path, blk->fileName, filter_len) != 0;
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
if (!filter)
{
2009-11-03 14:30:37 +01:00
if (used)
2009-11-04 01:20:56 +01:00
{
fprintf(file, "USED %p: size=%" SIZEFORMAT " allocated at %s:%d\n",
blk, blk->length, blk->fileName, blk->lineNumber);
2009-11-04 01:20:56 +01:00
}
2009-11-03 14:30:37 +01:00
else
fprintf(file, "FREE %p: size=%" SIZEFORMAT "\n", blk, blk->length);
}
}
}
2009-11-03 14:30:37 +01:00
#endif
2009-11-03 14:30:37 +01:00
// This member function can't be const because there are calls to the mutex.
void MemoryPool::print_contents(FILE* file, bool used_only, const char* filter_path) throw ()
{
2009-11-03 14:30:37 +01:00
#ifdef MEM_DEBUG
MutexLockGuard guard(mutex, "MemoryPool::print_contents");
2009-11-03 14:30:37 +01:00
fprintf(file, "********* Printing contents of pool %p used=%ld mapped=%ld\n",
this, (long)used_memory.value(), (long)mapped_memory.value());
2009-11-03 14:30:37 +01:00
if (!used_only)
{
2009-11-03 14:30:37 +01:00
filter_path = NULL;
}
2009-11-03 14:30:37 +01:00
const size_t filter_len = filter_path ? strlen(filter_path) : 0;
2009-11-03 14:30:37 +01:00
// Print small hunks
for (MemSmallHunk* hunk = smallHunks; hunk; hunk = hunk->nextHunk)
{
2009-11-03 14:30:37 +01:00
int l = ROUNDUP(sizeof (MemSmallHunk), sizeof (double));
UCHAR* ptr = ((UCHAR*) hunk) + l;
size_t used = hunk->length - hunk->spaceRemaining;
2009-11-04 01:20:56 +01:00
fprintf(file, "\nSmall hunk %p size=%ld used=%ld remain=%ld\n",
2009-11-03 14:30:37 +01:00
hunk, hunk->length, used, hunk->spaceRemaining);
while (ptr < hunk->memory)
{
MemHeader* m = (MemHeader*)ptr;
print_block(m->fileName != NULL, file, m, used_only, filter_path, filter_len);
ptr += m->length;
}
}
2009-11-04 01:20:56 +01:00
2009-11-03 14:30:37 +01:00
// Print big hunks
for (MemBigHunk* hunk = bigHunks; hunk; hunk = hunk->nextHunk)
2009-02-03 12:02:00 +01:00
{
2009-11-03 14:30:37 +01:00
fprintf(file, "\nBig hunk %p size=%ld\n", hunk, hunk->length);
for (MemBigObject* block = (MemBigObject*) &hunk->blocks; block; block = block->next)
{
2009-11-03 14:30:37 +01:00
print_block(block->memHeader.pool != NULL, file, &block->memHeader, used_only, filter_path, filter_len);
}
}
2009-11-03 14:30:37 +01:00
#endif
2003-01-10 13:27:57 +01:00
}
2009-11-03 14:30:37 +01:00
// Declare thread-specific variable for context memory pool
TLS_DECLARE(MemoryPool*, contextPool);
2009-11-03 14:30:37 +01:00
MemoryPool* MemoryPool::setContextPool(MemoryPool* newPool)
{
2009-11-03 14:30:37 +01:00
MemoryPool* const old = TLS_GET(contextPool);
TLS_SET(contextPool, newPool);
return old;
}
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
MemoryPool* MemoryPool::getContextPool()
{
return TLS_GET(contextPool);
}
2008-04-19 13:11:10 +02:00
MemoryPool& AutoStorage::getAutoMemoryPool()
2008-12-05 02:20:14 +01:00
{
MemoryPool* p = MemoryPool::getContextPool();
if (! p)
{
p = getDefaultMemoryPool();
}
fb_assert(p);
2008-12-05 02:20:14 +01:00
return *p;
}
#ifdef LIBC_CALLS_NEW
2009-08-01 20:41:46 +02:00
void* MemoryPool::globalAlloc(size_t s) THROW_BAD_ALLOC
{
if (!processMemoryPool)
{
// this will do all required init, including processMemoryPool creation
static Firebird::InstanceControl dummy;
fb_assert(processMemoryPool);
}
return processMemoryPool->allocate(s
#ifdef DEBUG_GDS_ALLOC
,__FILE__, __LINE__
#endif
);
}
#endif // LIBC_CALLS_NEW
#if defined(DEV_BUILD)
2008-04-19 13:11:10 +02:00
void AutoStorage::ProbeStack() const
{
//
2008-12-05 02:20:14 +01:00
// AutoStorage() default constructor can be used only
// for objects on the stack. ProbeStack() uses the
// following assumptions to check it:
// 1. One and only one stack is used for all kind of variables.
// 2. Objects don't grow > 128K.
//
2009-02-05 02:08:13 +01:00
char probeVar = '\0';
const char* myStack = &probeVar;
const char* thisLocation = (const char*) this;
ptrdiff_t distance = thisLocation - myStack;
2009-11-03 14:30:37 +01:00
fb_assert(absVal(distance) < 128 * 1024);
}
#endif
} // namespace Firebird