2002-12-14 22:43:18 +01:00
|
|
|
/*
|
|
|
|
* PROGRAM: Client/Server Common Code
|
|
|
|
* MODULE: alloc.cpp
|
|
|
|
* DESCRIPTION: Memory Pool Manager (based on B+ tree)
|
|
|
|
*
|
2004-06-30 03:26:40 +02:00
|
|
|
* The contents of this file are subject to the Initial
|
|
|
|
* Developer's Public License Version 1.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
|
|
|
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
|
2002-12-14 22:43:18 +01:00
|
|
|
*
|
2004-06-30 03:26:40 +02:00
|
|
|
* Software distributed under the License is distributed AS IS,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing rights
|
|
|
|
* and limitations under the License.
|
2002-12-14 22:43:18 +01:00
|
|
|
*
|
2004-06-30 03:26:40 +02:00
|
|
|
* The Original Code was created by Nickolay Samofatov
|
|
|
|
* for the Firebird Open Source RDBMS project.
|
2002-12-14 22:43:18 +01:00
|
|
|
*
|
2004-06-30 03:26:40 +02:00
|
|
|
* Copyright (c) 2004 Nickolay Samofatov <nickolay@broadviewsoftware.com>
|
|
|
|
* and all contributors signed below.
|
|
|
|
*
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
2009-11-03 14:30:37 +01:00
|
|
|
* The Original Code was created by James A. Starkey for IBPhoenix.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2004 James A. Starkey
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Contributor(s):
|
|
|
|
*
|
|
|
|
* Alex Peshkoff <peshkoff@mail.ru>
|
|
|
|
* added PermanentStorage and AutoStorage classes.
|
|
|
|
* merged parts of Nickolay and Jim code to be used together
|
2002-12-14 22:43:18 +01:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2009-04-17 16:10:11 +02:00
|
|
|
// PLEASE, DO NOT CONSTIFY THIS MODULE !!!
|
2004-08-10 06:10:47 +02:00
|
|
|
|
2004-03-26 00:12:50 +01:00
|
|
|
#include "firebird.h"
|
2010-10-13 12:39:52 +02:00
|
|
|
#include "../common/common.h"
|
2004-03-26 00:12:50 +01:00
|
|
|
#include "../common/classes/alloc.h"
|
2009-11-03 14:30:37 +01:00
|
|
|
|
|
|
|
#ifndef WIN_NT
|
2004-03-26 00:12:50 +01:00
|
|
|
#include <unistd.h>
|
2004-03-02 09:43:33 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <fcntl.h>
|
2004-03-01 04:18:42 +01:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
2002-12-14 22:43:18 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#include "../common/classes/fb_tls.h"
|
|
|
|
#include "../common/classes/locks.h"
|
|
|
|
#include "../common/classes/init.h"
|
|
|
|
#include "../common/classes/vector.h"
|
|
|
|
#include "gen/iberror.h"
|
|
|
|
|
2004-07-31 00:38:08 +02:00
|
|
|
#ifdef USE_VALGRIND
|
2004-09-01 11:36:44 +02:00
|
|
|
#include <valgrind/memcheck.h>
|
2008-04-27 20:53:33 +02:00
|
|
|
|
|
|
|
#ifndef VALGRIND_MAKE_WRITABLE // Valgrind 3.3
|
|
|
|
#define VALGRIND_MAKE_WRITABLE VALGRIND_MAKE_MEM_UNDEFINED
|
|
|
|
#define VALGRIND_MAKE_NOACCESS VALGRIND_MAKE_MEM_NOACCESS
|
2004-07-31 00:38:08 +02:00
|
|
|
#endif
|
2002-12-16 19:33:54 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#define VALGRIND_FIX_IT // overrides suspicious valgrind behavior
|
|
|
|
#endif // USE_VALGRIND
|
2004-03-26 00:12:50 +01:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
/*** emergency debugging stuff
|
2009-11-04 01:20:56 +01:00
|
|
|
static const char* lastFileName;
|
|
|
|
static int lastLine;
|
|
|
|
static void* lastBlock;
|
|
|
|
static void* stopAddress = (void*) 0x2254938;
|
2009-11-03 14:30:37 +01:00
|
|
|
***/
|
|
|
|
|
|
|
|
#ifdef MEM_DEBUG
|
2009-12-29 13:46:53 +01:00
|
|
|
static const int GUARD_BYTES = Firebird::ALLOC_ALIGNMENT; // * 2048;
|
|
|
|
static const UCHAR INIT_BYTE = 0xCC;
|
|
|
|
static const UCHAR GUARD_BYTE = 0xDD;
|
|
|
|
static const UCHAR DEL_BYTE = 0xEE;
|
2009-11-03 14:30:37 +01:00
|
|
|
#else
|
2009-11-04 01:20:56 +01:00
|
|
|
static const int GUARD_BYTES = 0;
|
2004-03-26 00:12:50 +01:00
|
|
|
#endif
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
template <typename T>
|
|
|
|
T absVal(T n) throw ()
|
2007-04-04 14:21:09 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
return n < 0 ? -n : n;
|
2007-04-04 14:21:09 +02:00
|
|
|
}
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2004-08-09 03:24:54 +02:00
|
|
|
#ifdef USE_VALGRIND
|
|
|
|
// Circular FIFO buffer of read/write protected extents pending free operation
|
|
|
|
// Race protected via cache_mutex.
|
2008-04-19 13:11:10 +02:00
|
|
|
struct DelayedExtent
|
|
|
|
{
|
2009-02-05 02:08:13 +01:00
|
|
|
void* memory; // Extent pointer
|
2004-08-09 03:24:54 +02:00
|
|
|
size_t size; // Size of extent
|
|
|
|
int handle; // Valgrind handle of protected extent block
|
|
|
|
};
|
|
|
|
|
|
|
|
DelayedExtent delayedExtents[DELAYED_EXTENT_COUNT];
|
|
|
|
size_t delayedExtentCount = 0;
|
|
|
|
size_t delayedExtentsPos = 0;
|
|
|
|
#endif
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
} // anonymous namespace
|
2004-03-26 00:12:50 +01:00
|
|
|
|
|
|
|
namespace Firebird {
|
|
|
|
|
2011-03-02 14:42:56 +01:00
|
|
|
// This is required for modules that do not define any GlobalPtr themself
|
|
|
|
GlobalPtr<Mutex> forceCreationOfDefaultMemoryPool;
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
MemoryPool* MemoryPool::defaultMemoryManager = NULL;
|
|
|
|
MemoryStats* MemoryPool::default_stats_group = NULL;
|
|
|
|
Mutex* cache_mutex = NULL;
|
2008-01-23 16:52:40 +01:00
|
|
|
|
|
|
|
// Initialize process memory pool (called from InstanceControl).
|
|
|
|
|
|
|
|
void MemoryPool::init()
|
|
|
|
{
|
|
|
|
static char mtxBuffer[sizeof(Mutex) + ALLOC_ALIGNMENT];
|
2008-12-24 08:46:11 +01:00
|
|
|
cache_mutex = new((void*)(IPTR) MEM_ALIGN((size_t)(IPTR) mtxBuffer)) Mutex;
|
2008-01-23 16:52:40 +01:00
|
|
|
|
|
|
|
static char msBuffer[sizeof(MemoryStats) + ALLOC_ALIGNMENT];
|
2009-11-03 14:30:37 +01:00
|
|
|
default_stats_group =
|
2008-01-23 16:52:40 +01:00
|
|
|
new((void*)(IPTR) MEM_ALIGN((size_t)(IPTR) msBuffer)) MemoryStats;
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
static char mpBuffer[sizeof(MemoryPool) + ALLOC_ALIGNMENT];
|
|
|
|
defaultMemoryManager =
|
|
|
|
new((void*)(IPTR) MEM_ALIGN((size_t)(IPTR) mpBuffer)) MemoryPool;
|
2008-01-23 16:52:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should be last routine, called by InstanceControl,
|
|
|
|
// being therefore the very last routine in firebird module.
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2008-01-23 16:52:40 +01:00
|
|
|
void MemoryPool::cleanup()
|
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef VALGRIND_FIX_IT
|
|
|
|
VALGRIND_DISCARD(
|
|
|
|
VALGRIND_MAKE_MEM_DEFINED(cache_mutex, sizeof(Mutex)));
|
|
|
|
VALGRIND_DISCARD(
|
|
|
|
VALGRIND_MAKE_MEM_DEFINED(default_stats_group, sizeof(MemoryStats)));
|
|
|
|
VALGRIND_DISCARD(
|
|
|
|
VALGRIND_MAKE_MEM_DEFINED(defaultMemoryManager, sizeof(MemoryPool)));
|
2008-01-23 16:52:40 +01:00
|
|
|
#endif
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2010-10-12 10:02:57 +02:00
|
|
|
if (defaultMemoryManager)
|
|
|
|
{
|
|
|
|
defaultMemoryManager->~MemoryPool();
|
|
|
|
defaultMemoryManager = NULL;
|
|
|
|
}
|
2010-10-12 19:40:27 +02:00
|
|
|
|
2010-10-12 10:02:57 +02:00
|
|
|
if (default_stats_group)
|
|
|
|
{
|
|
|
|
default_stats_group->~MemoryStats();
|
|
|
|
default_stats_group = NULL;
|
|
|
|
}
|
2010-10-12 19:40:27 +02:00
|
|
|
|
2010-10-12 10:02:57 +02:00
|
|
|
if (cache_mutex)
|
|
|
|
{
|
|
|
|
cache_mutex->~Mutex();
|
|
|
|
cache_mutex = NULL;
|
|
|
|
}
|
2008-01-23 16:52:40 +01:00
|
|
|
}
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2010-10-27 11:49:16 +02:00
|
|
|
MemoryPool::MemoryPool(bool shared, int rounding, int cutoff, int minAlloc)
|
2009-11-04 01:20:56 +01:00
|
|
|
: roundingSize(rounding), threshold(cutoff), minAllocation(minAlloc),
|
2010-10-27 11:49:16 +02:00
|
|
|
threadShared(shared), pool_destroying(false), stats(default_stats_group), parent(NULL)
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-04 11:51:27 +01:00
|
|
|
size_t vecSize = (cutoff + rounding) / rounding;
|
2011-04-29 17:50:04 +02:00
|
|
|
init(allocRaw(vecSize * sizeof(void*)), vecSize);
|
2010-10-27 11:49:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
MemoryPool::MemoryPool(MemoryPool& p, MemoryStats& s, bool shared, int rounding, int cutoff, int minAlloc)
|
|
|
|
: roundingSize(rounding), threshold(cutoff), minAllocation(minAlloc),
|
|
|
|
threadShared(shared), pool_destroying(false), stats(&s), parent(&p)
|
|
|
|
{
|
|
|
|
size_t vecSize = (cutoff + rounding) / rounding;
|
2011-04-29 17:50:04 +02:00
|
|
|
init(parent->allocate(vecSize * sizeof(void*)), vecSize);
|
2010-10-27 11:49:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryPool::init(void* memory, size_t length)
|
|
|
|
{
|
2011-05-03 14:03:16 +02:00
|
|
|
// hvlad: we not used placement new[] there as :
|
|
|
|
// a) by standard placement new[] could add some (unknown!) overhead and use
|
2011-05-07 21:33:46 +02:00
|
|
|
// part of allocated memory for own use. For example MSVC reserved first array
|
|
|
|
// slot and stored items number in it returning advanced pointer. In our case
|
|
|
|
// it results in that freeObjects != memory and AV when freeObjects's memory is
|
2011-05-03 14:03:16 +02:00
|
|
|
// deallocated as freeObjects don't points to a parent pool anymore.
|
|
|
|
// b) constructor of AtomicPointer does nothing except of zero'ing memory, plain
|
2011-05-07 21:33:46 +02:00
|
|
|
// memset will do it much faster. destructor of AtomicPointer is empty and we
|
2011-05-03 14:03:16 +02:00
|
|
|
// don't need to call it. This behavior is unlikely to be changed.
|
2011-05-07 21:33:46 +02:00
|
|
|
//
|
|
|
|
// While we can workaround (a) storing memory to release it correctly later,
|
2011-05-03 14:03:16 +02:00
|
|
|
// we can't predict in portable way how much overhead is necessary to allocate
|
|
|
|
// memory correctly.
|
|
|
|
|
|
|
|
freeObjects = (FreeChainPtr*) memory;
|
|
|
|
memset(freeObjects, 0, length * sizeof(void*));
|
2009-11-03 14:30:37 +01:00
|
|
|
bigHunks = NULL;
|
|
|
|
smallHunks = NULL;
|
|
|
|
freeBlocks.nextLarger = freeBlocks.priorSmaller = &freeBlocks;
|
|
|
|
junk.nextLarger = junk.priorSmaller = &junk;
|
|
|
|
blocksAllocated = 0;
|
|
|
|
blocksActive = 0;
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef USE_VALGRIND
|
|
|
|
delayedFreeCount = 0;
|
|
|
|
delayedFreePos = 0;
|
2003-01-30 14:26:16 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
VALGRIND_CREATE_MEMPOOL(this, VALGRIND_REDZONE, 0);
|
2009-07-12 19:37:55 +02:00
|
|
|
#endif
|
2003-01-03 17:03:30 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
MemoryPool::~MemoryPool(void)
|
2004-08-21 11:18:24 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
pool_destroying = true;
|
2004-08-09 03:24:54 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
decrement_usage(used_memory.value());
|
|
|
|
decrement_mapping(mapped_memory.value());
|
2004-08-09 03:24:54 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef USE_VALGRIND
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(this);
|
2004-08-09 03:24:54 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Do not forget to discard stack traces for delayed free blocks
|
|
|
|
for (size_t i = 0; i < delayedFreeCount; i++)
|
2009-02-03 12:02:00 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MemBlock* block = delayedFree[i];
|
2009-11-04 11:51:27 +01:00
|
|
|
void* object = &block->body;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
VALGRIND_DISCARD(
|
|
|
|
VALGRIND_MAKE_MEM_DEFINED(block, OFFSET(MemBlock*, body)));
|
|
|
|
VALGRIND_DISCARD(
|
2011-01-21 18:18:40 +01:00
|
|
|
VALGRIND_MAKE_WRITABLE(object, block->length));
|
2004-08-09 03:24:54 +02:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif
|
2004-08-09 03:24:54 +02:00
|
|
|
|
2010-10-27 11:49:16 +02:00
|
|
|
if (parent)
|
|
|
|
{
|
|
|
|
MemoryPool::release(freeObjects);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
releaseRaw(freeObjects, ((threshold + roundingSize) / roundingSize) * sizeof(void*));
|
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
freeObjects = NULL;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
for (MemSmallHunk* hunk; hunk = smallHunks;)
|
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
smallHunks = hunk->nextHunk;
|
|
|
|
releaseRaw (hunk, minAllocation);
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (MemBigHunk* hunk; hunk = bigHunks;)
|
2009-11-03 14:30:37 +01:00
|
|
|
{
|
|
|
|
bigHunks = hunk->nextHunk;
|
2009-11-04 01:20:56 +01:00
|
|
|
releaseRaw(hunk, hunk->length);
|
2004-03-01 04:18:42 +01:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
}
|
2006-04-05 08:25:47 +02:00
|
|
|
|
2010-10-27 11:49:16 +02:00
|
|
|
MemoryPool* MemoryPool::createPool(MemoryPool* parentPool, MemoryStats& stats)
|
|
|
|
{
|
|
|
|
if (!parentPool)
|
|
|
|
{
|
|
|
|
parentPool = getDefaultMemoryPool();
|
|
|
|
}
|
|
|
|
return FB_NEW(*parentPool) MemoryPool(*parentPool, stats);
|
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::setStatsGroup(MemoryStats& newStats) throw ()
|
|
|
|
{
|
|
|
|
MutexLockGuard guard(mutex, "MemoryPool::setStatsGroup");
|
2006-04-05 08:25:47 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
const size_t sav_used_memory = used_memory.value();
|
|
|
|
const size_t sav_mapped_memory = mapped_memory.value();
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
stats->decrement_mapping(sav_mapped_memory);
|
|
|
|
stats->decrement_usage(sav_used_memory);
|
2008-12-24 08:46:11 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
this->stats = &newStats;
|
2008-12-24 08:46:11 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
stats->increment_mapping(sav_mapped_memory);
|
|
|
|
stats->increment_usage(sav_used_memory);
|
2002-12-14 22:43:18 +01:00
|
|
|
}
|
2008-12-05 02:20:14 +01:00
|
|
|
|
2009-11-04 12:37:10 +01:00
|
|
|
MemBlock* MemoryPool::alloc(const size_t length) throw (std::bad_alloc)
|
2008-04-19 13:11:10 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MutexLockGuard guard(mutex, "MemoryPool::alloc");
|
2002-12-14 22:43:18 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// If this is a small block, look for it there
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (length <= threshold)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
unsigned int slot = length / roundingSize;
|
2009-11-04 01:20:56 +01:00
|
|
|
MemBlock* block;
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (threadShared)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
|
|
|
while (block = freeObjects[slot])
|
|
|
|
{
|
2011-04-29 17:50:04 +02:00
|
|
|
if (freeObjects[slot].compareExchange(block, block->next))
|
2009-11-03 14:30:37 +01:00
|
|
|
{
|
|
|
|
#ifdef MEM_DEBUG
|
2011-01-21 18:18:40 +01:00
|
|
|
if (slot != block->length / roundingSize)
|
2009-11-04 01:20:56 +01:00
|
|
|
corrupt("length trashed for block in slot");
|
2004-08-09 03:24:54 +02:00
|
|
|
#endif
|
2009-11-03 14:30:37 +01:00
|
|
|
return block;
|
|
|
|
}
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
else
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
|
|
|
block = freeObjects[slot];
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (block)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
|
|
|
freeObjects[slot] = (MemBlock*) block->pool;
|
2004-08-09 03:24:54 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef MEM_DEBUG
|
2011-01-21 18:18:40 +01:00
|
|
|
if (slot != block->length / roundingSize)
|
2009-11-04 01:20:56 +01:00
|
|
|
corrupt("length trashed for block in slot");
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif
|
|
|
|
return block;
|
|
|
|
}
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// See if some other hunk has unallocated space to use
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
MemSmallHunk* hunk;
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
for (hunk = smallHunks; hunk; hunk = hunk->nextHunk)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (length <= hunk->spaceRemaining)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MemBlock *block = (MemBlock*) hunk->memory;
|
|
|
|
hunk->memory += length;
|
|
|
|
hunk->spaceRemaining -= length;
|
2011-01-21 18:18:40 +01:00
|
|
|
block->length = length;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
return block;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// No good so far. Time for a new hunk
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
hunk = (MemSmallHunk*) allocRaw(minAllocation);
|
2009-11-03 14:30:37 +01:00
|
|
|
hunk->length = minAllocation - 16;
|
|
|
|
hunk->nextHunk = smallHunks;
|
|
|
|
smallHunks = hunk;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-04 11:51:27 +01:00
|
|
|
size_t l = ROUNDUP(sizeof(MemSmallHunk), sizeof(double));
|
2009-11-03 14:30:37 +01:00
|
|
|
block = (MemBlock*) ((UCHAR*) hunk + l);
|
|
|
|
hunk->spaceRemaining = minAllocation - length - l;
|
|
|
|
hunk->memory = (UCHAR*) block + length;
|
2011-01-21 18:18:40 +01:00
|
|
|
block->length = length;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
return block;
|
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
/*
|
|
|
|
* OK, we've got a "big block" on on hands. To maximize confusing, the indicated
|
|
|
|
* length of a free big block is the length of MemHeader plus body, explicitly
|
|
|
|
* excluding the MemFreeBlock and MemBigHeader fields.
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
[MemHeader::length]
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
<---- MemBlock ---->
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
*--------------*----------*---------*
|
|
|
|
| MemBigHeader | MemHeader | Body |
|
|
|
|
*--------------*----------*---------*
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
<---- MemBigObject ----->
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
*--------------*----------*---------------*
|
|
|
|
| MemBigHeader | MemHeader | MemFreeBlock |
|
|
|
|
*--------------*----------*---------------*
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
<--------------- MemFreeBlock ---------->
|
|
|
|
*/
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
|
|
|
|
MemFreeBlock* freeBlock;
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
for (freeBlock = freeBlocks.nextLarger; freeBlock != &freeBlocks; freeBlock = freeBlock->nextLarger)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (freeBlock->memHeader.length >= length)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
|
|
|
remove(freeBlock);
|
|
|
|
MemBlock* block = (MemBlock*) &freeBlock->memHeader;
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Compute length (MemHeader + body) for new free block
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
unsigned int tail = block->length - length;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// If there isn't room to split off a new free block, allocate the whole thing
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
if (tail < sizeof(MemFreeBlock))
|
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
block->pool = this;
|
|
|
|
return block;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Otherwise, chop up the block
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
MemBigObject* newBlock = freeBlock;
|
2009-11-03 14:30:37 +01:00
|
|
|
freeBlock = (MemFreeBlock*) ((UCHAR*) block + length);
|
2009-11-04 01:20:56 +01:00
|
|
|
freeBlock->memHeader.length = tail - sizeof(MemBigObject);
|
2009-11-03 14:30:37 +01:00
|
|
|
block->length = length;
|
|
|
|
block->pool = this;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (freeBlock->next = newBlock->next)
|
|
|
|
freeBlock->next->prior = freeBlock;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
newBlock->next = freeBlock;
|
|
|
|
freeBlock->prior = newBlock;
|
|
|
|
freeBlock->memHeader.pool = NULL; // indicate block is free
|
2009-11-04 01:20:56 +01:00
|
|
|
insert(freeBlock);
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
return block;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
}
|
2003-01-10 13:27:57 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Didn't find existing space -- allocate new hunk
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
size_t hunkLength = sizeof (MemBigHunk) + sizeof(MemBigHeader) + length;
|
|
|
|
size_t freeSpace = 0;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// If the hunk size is sufficient below minAllocation, allocate extra space
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (hunkLength + sizeof(MemBigObject) + threshold < minAllocation)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
hunkLength = minAllocation;
|
|
|
|
//freeSpace = hunkLength - 2 * sizeof(MemBigObject) - length;
|
|
|
|
freeSpace = hunkLength - sizeof(MemBigHunk) - 2 * sizeof(MemBigHeader) - length;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Allocate the new hunk
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
MemBigHunk* hunk = (MemBigHunk*) allocRaw(hunkLength);
|
2009-11-03 14:30:37 +01:00
|
|
|
hunk->nextHunk = bigHunks;
|
|
|
|
bigHunks = hunk;
|
|
|
|
hunk->length = hunkLength;
|
|
|
|
|
|
|
|
// Create the new block
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
MemBigObject* newBlock = (MemBigObject*) &hunk->blocks;
|
2009-11-03 14:30:37 +01:00
|
|
|
newBlock->prior = NULL;
|
|
|
|
newBlock->next = NULL;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
MemBlock* block = (MemBlock*) &newBlock->memHeader;
|
2009-11-03 14:30:37 +01:00
|
|
|
block->pool = this;
|
|
|
|
block->length = length;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// If there is space left over, create a free block
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (freeSpace)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
freeBlock = (MemFreeBlock*) ((UCHAR*) block + length);
|
|
|
|
freeBlock->memHeader.length = freeSpace;
|
|
|
|
freeBlock->memHeader.pool = NULL;
|
|
|
|
freeBlock->next = NULL;
|
|
|
|
freeBlock->prior = newBlock;
|
|
|
|
newBlock->next = freeBlock;
|
2009-11-04 01:20:56 +01:00
|
|
|
insert(freeBlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return block;
|
2003-01-10 13:27:57 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void* MemoryPool::allocate(size_t size
|
2004-03-26 00:12:50 +01:00
|
|
|
#ifdef DEBUG_GDS_ALLOC
|
2009-11-03 14:30:37 +01:00
|
|
|
, const char* fileName, int line
|
2004-03-26 00:12:50 +01:00
|
|
|
#endif
|
2009-11-03 14:30:37 +01:00
|
|
|
) throw (std::bad_alloc)
|
|
|
|
{
|
2009-11-04 11:51:27 +01:00
|
|
|
size_t length = ROUNDUP(size + VALGRIND_REDZONE, roundingSize) + OFFSET(MemBlock*, body) + GUARD_BYTES;
|
2009-11-04 01:20:56 +01:00
|
|
|
MemBlock* memory = alloc(length);
|
|
|
|
|
2004-07-31 00:38:08 +02:00
|
|
|
#ifdef USE_VALGRIND
|
2009-11-03 14:30:37 +01:00
|
|
|
VALGRIND_MEMPOOL_ALLOC(this, &memory->body, size);
|
2004-07-31 00:38:08 +02:00
|
|
|
#endif
|
2008-04-19 13:11:10 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
memory->pool = this;
|
2008-04-23 04:19:25 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef DEBUG_GDS_ALLOC
|
|
|
|
memory->fileName = fileName;
|
|
|
|
memory->lineNumber = line;
|
|
|
|
memset (&memory->body, INIT_BYTE, size);
|
2011-01-21 18:18:40 +01:00
|
|
|
memset (&memory->body + size, GUARD_BYTE, memory->length - size - OFFSET(MemBlock*,body));
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif
|
2008-04-23 04:19:25 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
++blocksAllocated;
|
|
|
|
++blocksActive;
|
|
|
|
|
2011-01-21 18:18:40 +01:00
|
|
|
increment_usage(memory->length);
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
return &memory->body;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MemoryPool::release(void* object) throw ()
|
|
|
|
{
|
|
|
|
if (object)
|
2008-12-24 08:46:11 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MemBlock* block = (MemBlock*) ((UCHAR*) object - OFFSET(MemBlock*, body));
|
|
|
|
MemoryPool* pool = block->pool;
|
|
|
|
|
2004-07-31 00:38:08 +02:00
|
|
|
#ifdef USE_VALGRIND
|
2009-11-03 14:30:37 +01:00
|
|
|
// Synchronize delayed free queue using pool mutex
|
|
|
|
MutexLockGuard guard (pool->mutex, "MemoryPool::deallocate USE_VALGRIND");
|
|
|
|
|
|
|
|
// Notify Valgrind that block is freed from the pool
|
|
|
|
VALGRIND_MEMPOOL_FREE(pool, object);
|
|
|
|
|
|
|
|
// block is placed in delayed buffer - mark as NOACCESS for that time
|
|
|
|
VALGRIND_DISCARD(VALGRIND_MAKE_NOACCESS(block, OFFSET(MemBlock*, body)));
|
|
|
|
|
|
|
|
// Extend circular buffer if possible
|
2009-11-04 01:20:56 +01:00
|
|
|
if (pool->delayedFreeCount < FB_NELEM(pool->delayedFree))
|
2009-11-03 14:30:37 +01:00
|
|
|
{
|
|
|
|
pool->delayedFree[pool->delayedFreeCount] = block;
|
|
|
|
pool->delayedFreeCount++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shift circular buffer pushing out oldest item
|
|
|
|
MemBlock* requested_block = block;
|
|
|
|
|
|
|
|
block = pool->delayedFree[pool->delayedFreePos];
|
2009-11-04 11:51:27 +01:00
|
|
|
object = &block->body;
|
2009-11-03 14:30:37 +01:00
|
|
|
|
|
|
|
// Re-enable access to MemBlock
|
|
|
|
VALGRIND_DISCARD(VALGRIND_MAKE_MEM_DEFINED(block, OFFSET(MemBlock*, body)));
|
|
|
|
|
|
|
|
// Remove protection from memory block
|
|
|
|
#ifdef VALGRIND_FIX_IT
|
|
|
|
VALGRIND_DISCARD(
|
2011-01-21 18:18:40 +01:00
|
|
|
VALGRIND_MAKE_MEM_DEFINED(object, block->length - VALGRIND_REDZONE));
|
2009-11-03 14:30:37 +01:00
|
|
|
#else
|
|
|
|
VALGRIND_DISCARD(
|
2011-01-21 18:18:40 +01:00
|
|
|
VALGRIND_MAKE_WRITABLE(object, block->length - VALGRIND_REDZONE));
|
2008-12-05 02:20:14 +01:00
|
|
|
#endif
|
2003-01-09 20:47:46 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Replace element in circular buffer
|
|
|
|
pool->delayedFree[pool->delayedFreePos] = requested_block;
|
|
|
|
|
|
|
|
// Move queue pointer to next element and cycle if needed
|
|
|
|
if (++(pool->delayedFreePos) >= FB_NELEM(pool->delayedFree))
|
|
|
|
pool->delayedFreePos = 0;
|
2004-03-09 01:17:07 +01:00
|
|
|
#endif
|
2009-11-03 14:30:37 +01:00
|
|
|
|
2011-01-21 18:18:40 +01:00
|
|
|
size_t size = block->length;
|
2004-03-09 01:17:07 +01:00
|
|
|
#ifdef DEBUG_GDS_ALLOC
|
2009-11-03 14:30:37 +01:00
|
|
|
block->fileName = NULL;
|
2004-03-09 01:17:07 +01:00
|
|
|
#endif
|
2009-11-03 14:30:37 +01:00
|
|
|
pool->releaseBlock(block);
|
|
|
|
pool->decrement_usage(size);
|
|
|
|
}
|
2004-03-09 01:17:07 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::releaseBlock(MemBlock *block) throw ()
|
2008-04-19 13:11:10 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (!freeObjects)
|
|
|
|
return;
|
2004-08-10 06:10:47 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (block->pool != this)
|
|
|
|
corrupt("bad block released");
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef MEM_DEBUG
|
2011-01-21 18:18:40 +01:00
|
|
|
for (const UCHAR* end = (UCHAR*) block + block->length, *p = end - GUARD_BYTES; p < end;)
|
2010-01-26 09:20:27 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (*p++ != GUARD_BYTE)
|
2009-11-04 01:20:56 +01:00
|
|
|
corrupt("guard bytes overwritten");
|
2010-01-26 09:20:27 +01:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif
|
2004-08-10 06:10:47 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
--blocksActive;
|
2011-01-21 18:18:40 +01:00
|
|
|
const size_t length = block->length;
|
2004-08-10 06:10:47 +02:00
|
|
|
|
2011-01-21 18:18:40 +01:00
|
|
|
// If length is less than threshold, this is a small block
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2011-01-21 18:18:40 +01:00
|
|
|
if (length <= threshold)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef MEM_DEBUG
|
2011-01-21 18:18:40 +01:00
|
|
|
memset (&block->body, DEL_BYTE, length - OFFSET(MemBlock*, body));
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif
|
2009-07-12 19:37:55 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (threadShared)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2011-01-21 18:18:40 +01:00
|
|
|
for (int slot = length / roundingSize;;)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2011-04-29 17:50:04 +02:00
|
|
|
block->next = freeObjects[slot];
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2011-04-29 17:50:04 +02:00
|
|
|
if (freeObjects[slot].compareExchange(block->next, block))
|
2009-11-03 14:30:37 +01:00
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
}
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2011-01-21 18:18:40 +01:00
|
|
|
int slot = length / roundingSize;
|
2011-06-15 02:47:19 +02:00
|
|
|
void* next = freeObjects[slot];
|
2009-11-03 14:30:37 +01:00
|
|
|
block->pool = (MemoryPool*) next;
|
|
|
|
freeObjects[slot] = block;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// OK, this is a large block. Try recombining with neighbors
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
MutexLockGuard guard(mutex, "MemoryPool::release");
|
2009-07-12 19:37:55 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef MEM_DEBUG
|
|
|
|
memset (&block->body, DEL_BYTE, length - OFFSET(MemBlock*, body));
|
|
|
|
#endif
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
MemFreeBlock *freeBlock = (MemFreeBlock*) ((UCHAR*) block - sizeof (MemBigHeader));
|
|
|
|
block->pool = NULL;
|
|
|
|
|
|
|
|
if (freeBlock->next && !freeBlock->next->memHeader.pool)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MemFreeBlock *next = (MemFreeBlock*) (freeBlock->next);
|
|
|
|
remove (next);
|
|
|
|
freeBlock->memHeader.length += next->memHeader.length + sizeof (MemBigHeader);
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (freeBlock->next = next->next)
|
|
|
|
freeBlock->next->prior = freeBlock;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (freeBlock->prior && !freeBlock->prior->memHeader.pool)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MemFreeBlock *prior = (MemFreeBlock*) (freeBlock->prior);
|
|
|
|
remove (prior);
|
|
|
|
prior->memHeader.length += freeBlock->memHeader.length + sizeof (MemBigHeader);
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (prior->next = freeBlock->next)
|
|
|
|
prior->next->prior = prior;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
freeBlock = prior;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// If the block has no neighbors, the entire hunk is empty and can be unlinked and
|
|
|
|
// released
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (freeBlock->prior == NULL && freeBlock->next == NULL)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
for (MemBigHunk **ptr = &bigHunks, *hunk; hunk = *ptr; ptr = &hunk->nextHunk)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (&hunk->blocks == freeBlock)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
*ptr = hunk->nextHunk;
|
|
|
|
decrement_mapping(hunk->length);
|
|
|
|
releaseRaw(hunk, hunk->length);
|
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
}
|
2004-08-09 03:24:54 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
corrupt("can't find big hunk");
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
insert (freeBlock);
|
|
|
|
}
|
2004-08-09 03:24:54 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::corrupt(const char* text) throw ()
|
|
|
|
{
|
|
|
|
#ifdef DEV_BUILD
|
|
|
|
fprintf(stderr, "%s\n", text);
|
|
|
|
abort();
|
2003-01-16 18:47:10 +01:00
|
|
|
#endif
|
2009-11-03 14:30:37 +01:00
|
|
|
}
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::memoryIsExhausted(void) throw (std::bad_alloc)
|
|
|
|
{
|
|
|
|
Firebird::BadAlloc::raise();
|
|
|
|
}
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::remove(MemFreeBlock* block) throw ()
|
|
|
|
{
|
|
|
|
// If this is junk, chop it out and be done with it
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (block->memHeader.length < threshold)
|
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// If we're a twin, take out of the twin list
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (!block->nextLarger)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
block->nextTwin->priorTwin = block->priorTwin;
|
|
|
|
block->priorTwin->nextTwin = block->nextTwin;
|
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// We're in the primary list. If we have twin, move him in
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
MemFreeBlock *twin = block->nextTwin;
|
2008-12-05 02:20:14 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (twin != block)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
block->priorTwin->nextTwin = twin;
|
|
|
|
twin->priorTwin = block->priorTwin;
|
|
|
|
twin->priorSmaller = block->priorSmaller;
|
|
|
|
twin->nextLarger = block->nextLarger;
|
|
|
|
twin->priorSmaller->nextLarger = twin;
|
|
|
|
twin->nextLarger->priorSmaller = twin;
|
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// No twins. Just take the guy out of the list
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
block->priorSmaller->nextLarger = block->nextLarger;
|
|
|
|
block->nextLarger->priorSmaller = block->priorSmaller;
|
2003-01-09 20:47:46 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::insert(MemFreeBlock* freeBlock) throw ()
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
// If this is junk (too small for pool), stick it in junk
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (freeBlock->memHeader.length < threshold)
|
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Start by finding insertion point
|
|
|
|
|
|
|
|
MemFreeBlock *block;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
|
|
|
for (block = freeBlocks.nextLarger;
|
2009-11-03 14:30:37 +01:00
|
|
|
block != &freeBlocks && freeBlock->memHeader.length >= block->memHeader.length;
|
|
|
|
block = block->nextLarger)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (block->memHeader.length == freeBlock->memHeader.length)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
// If this is a "twin" (same size block), hang off block
|
|
|
|
freeBlock->nextTwin = block->nextTwin;
|
|
|
|
freeBlock->nextTwin->priorTwin = freeBlock;
|
|
|
|
freeBlock->priorTwin = block;
|
|
|
|
block->nextTwin = freeBlock;
|
|
|
|
freeBlock->nextLarger = NULL;
|
|
|
|
return;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// OK, then, link in after insertion point
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
freeBlock->nextLarger = block;
|
|
|
|
freeBlock->priorSmaller = block->priorSmaller;
|
|
|
|
block->priorSmaller->nextLarger = freeBlock;
|
|
|
|
block->priorSmaller = freeBlock;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
freeBlock->nextTwin = freeBlock->priorTwin = freeBlock;
|
2004-03-26 00:12:50 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
namespace {
|
2005-12-24 08:28:31 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// We cache this amount of extents to avoid memory mapping overhead
|
|
|
|
const int MAP_CACHE_SIZE = 16; // == 1 MB
|
2004-10-25 07:14:12 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
Vector<void*, MAP_CACHE_SIZE> extents_cache;
|
|
|
|
|
|
|
|
volatile size_t map_page_size = 0;
|
|
|
|
int dev_zero_fd = 0;
|
|
|
|
|
|
|
|
#if defined(WIN_NT)
|
|
|
|
size_t get_page_size()
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
SYSTEM_INFO info;
|
|
|
|
GetSystemInfo(&info);
|
|
|
|
return info.dwPageSize;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
size_t get_page_size()
|
|
|
|
{
|
|
|
|
return sysconf(_SC_PAGESIZE);
|
|
|
|
}
|
|
|
|
#endif
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
inline size_t get_map_page_size()
|
|
|
|
{
|
|
|
|
if (! map_page_size)
|
2009-07-12 19:37:55 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MutexLockGuard guard(*cache_mutex);
|
|
|
|
if (! map_page_size)
|
|
|
|
map_page_size = get_page_size();
|
2004-03-26 00:12:50 +01:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
return map_page_size;
|
|
|
|
}
|
|
|
|
|
2002-12-16 19:33:54 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void* MemoryPool::allocRaw(size_t size) throw (std::bad_alloc)
|
2004-03-26 00:12:50 +01:00
|
|
|
{
|
2004-07-31 00:38:08 +02:00
|
|
|
#ifndef USE_VALGRIND
|
2009-11-04 12:37:10 +01:00
|
|
|
if (size == DEFAULT_ALLOCATION)
|
2009-02-03 12:02:00 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MutexLockGuard guard(*cache_mutex);
|
|
|
|
void* result = NULL;
|
2012-01-24 04:37:43 +01:00
|
|
|
if (extents_cache.getCount())
|
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
// Use most recently used object to encourage caching
|
|
|
|
result = extents_cache[extents_cache.getCount() - 1];
|
|
|
|
extents_cache.shrink(extents_cache.getCount() - 1);
|
|
|
|
}
|
|
|
|
if (result) {
|
|
|
|
return result;
|
2004-03-26 00:12:50 +01:00
|
|
|
}
|
2008-12-05 02:20:14 +01:00
|
|
|
}
|
2004-07-31 00:38:08 +02:00
|
|
|
#endif
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
size = FB_ALIGN(size, get_map_page_size());
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef WIN_NT
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void* result = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
if (!result)
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#else // WIN_NT
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef MAP_ANONYMOUS
|
2004-07-31 00:38:08 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef SOLARIS
|
|
|
|
#define FB_MMAP_FLAGS MAP_PRIVATE | MAP_ANON
|
|
|
|
#else // SOLARIS
|
|
|
|
#define FB_MMAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
|
|
|
|
#endif // SOLARIS
|
2002-12-14 22:43:18 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void* result = mmap(NULL, size, PROT_READ | PROT_WRITE, FB_MMAP_FLAGS, -1, 0);
|
2008-01-29 12:57:35 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#else // MAP_ANONYMOUS
|
2004-08-19 01:39:43 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (dev_zero_fd < 0)
|
|
|
|
dev_zero_fd = open("/dev/zero", O_RDWR);
|
|
|
|
void* result = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero_fd, 0);
|
2004-07-31 00:38:08 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif // MAP_ANONYMOUS
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-04 01:20:56 +01:00
|
|
|
if (result == MAP_FAILED)
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif // WIN_NT
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-02-05 02:08:13 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
// failure happens!
|
|
|
|
memoryIsExhausted();
|
|
|
|
return NULL;
|
2004-03-26 00:12:50 +01:00
|
|
|
}
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2004-07-31 00:38:08 +02:00
|
|
|
#ifdef USE_VALGRIND
|
2009-11-03 14:30:37 +01:00
|
|
|
// Let Valgrind forget that block was zero-initialized
|
|
|
|
VALGRIND_DISCARD(VALGRIND_MAKE_WRITABLE(result, size));
|
2004-07-31 00:38:08 +02:00
|
|
|
#endif
|
2007-07-27 15:40:39 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
increment_mapping(size);
|
|
|
|
return result;
|
2002-12-14 22:43:18 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::validateFreeList(void) throw ()
|
|
|
|
{
|
|
|
|
size_t len = 0;
|
|
|
|
int count = 0;
|
|
|
|
MemFreeBlock *block;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
for (block = freeBlocks.nextLarger; block != &freeBlocks; block = block->nextLarger)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (block->memHeader.length <= len)
|
|
|
|
corrupt ("bad free list\n");
|
|
|
|
len = block->memHeader.length;
|
|
|
|
++count;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
len += 1;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
for (block = freeBlocks.priorSmaller; block != &freeBlocks; block = block->priorSmaller)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (block->memHeader.length >= len)
|
|
|
|
corrupt ("bad free list\n");
|
|
|
|
len = block->memHeader.length;
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
}
|
2004-08-22 23:28:19 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::validateBigBlock(MemBigObject* block) throw ()
|
|
|
|
{
|
|
|
|
MemBigObject *neighbor;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (neighbor = block->prior)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if ((UCHAR*) &neighbor->memHeader + neighbor->memHeader.length != (UCHAR*) block)
|
|
|
|
corrupt ("bad neighbors");
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (neighbor = block->next)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if ((UCHAR*) &block->memHeader + block->memHeader.length != (UCHAR*) neighbor)
|
|
|
|
corrupt ("bad neighbors");
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryPool::releaseRaw(void *block, size_t size) throw ()
|
|
|
|
{
|
|
|
|
#ifndef USE_VALGRIND
|
2009-11-04 12:37:10 +01:00
|
|
|
if (size == DEFAULT_ALLOCATION)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MutexLockGuard guard(*cache_mutex);
|
2009-11-04 01:20:56 +01:00
|
|
|
if (extents_cache.getCount() < extents_cache.getCapacity())
|
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
extents_cache.add(block);
|
|
|
|
return;
|
2002-12-14 22:43:18 +01:00
|
|
|
}
|
2004-01-28 08:50:41 +01:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
#else
|
|
|
|
// Set access protection for block to prevent memory from deleted pool being accessed
|
|
|
|
int handle = /* //VALGRIND_MAKE_NOACCESS */ VALGRIND_MAKE_MEM_DEFINED(block, size);
|
|
|
|
|
|
|
|
size = FB_ALIGN(size, get_map_page_size());
|
|
|
|
|
|
|
|
void* unmapBlockPtr = block;
|
|
|
|
size_t unmapBlockSize = size;
|
|
|
|
|
|
|
|
// Employ extents delayed free logic only when pool is destroying.
|
|
|
|
// In normal case all blocks pass through queue of sufficent length by themselves
|
|
|
|
if (pool_destroying)
|
2009-02-03 12:02:00 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
// Synchronize delayed free queue using extents mutex
|
|
|
|
MutexLockGuard guard(*cache_mutex);
|
2008-04-19 13:11:10 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Extend circular buffer if possible
|
2009-11-04 01:20:56 +01:00
|
|
|
if (delayedExtentCount < FB_NELEM(delayedExtents))
|
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
DelayedExtent* item = &delayedExtents[delayedExtentCount];
|
|
|
|
item->memory = block;
|
|
|
|
item->size = size;
|
|
|
|
item->handle = handle;
|
|
|
|
delayedExtentCount++;
|
|
|
|
return;
|
2002-12-14 22:43:18 +01:00
|
|
|
}
|
2009-07-12 19:37:55 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
DelayedExtent* item = &delayedExtents[delayedExtentsPos];
|
2009-07-15 17:28:04 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Free message associated with old extent in Valgrind
|
|
|
|
VALGRIND_DISCARD(item->handle);
|
|
|
|
|
|
|
|
// Set up the block we are going to unmap
|
|
|
|
unmapBlockPtr = item->memory;
|
|
|
|
unmapBlockSize = item->size;
|
|
|
|
|
|
|
|
// Replace element in circular buffer
|
|
|
|
item->memory = block;
|
|
|
|
item->handle = handle;
|
|
|
|
item->size = size;
|
|
|
|
|
|
|
|
// Move queue pointer to next element and cycle if needed
|
|
|
|
delayedExtentsPos++;
|
|
|
|
if (delayedExtentsPos >= FB_NELEM(delayedExtents))
|
|
|
|
delayedExtentsPos = 0;
|
|
|
|
}
|
2009-11-04 01:20:56 +01:00
|
|
|
else
|
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
// Let Valgrind forget about unmapped block
|
|
|
|
VALGRIND_DISCARD(handle);
|
|
|
|
}
|
2009-07-12 19:37:55 +02:00
|
|
|
#endif
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
size = FB_ALIGN(size, get_map_page_size());
|
|
|
|
#ifdef WIN_NT
|
|
|
|
if (!VirtualFree(block, 0, MEM_RELEASE))
|
|
|
|
#else // WIN_NT
|
|
|
|
#if (defined SOLARIS) && (defined HAVE_CADDR_T)
|
|
|
|
if (munmap((caddr_t) block, size))
|
|
|
|
#else
|
|
|
|
if (munmap(block, size))
|
|
|
|
#endif
|
|
|
|
#endif // WIN_NT
|
|
|
|
corrupt ("OS memory deallocation error");
|
|
|
|
}
|
2009-07-14 03:18:17 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::globalFree(void* block) throw ()
|
|
|
|
{
|
|
|
|
deallocate(block);
|
|
|
|
}
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void* MemoryPool::calloc(size_t size
|
2003-01-07 17:35:10 +01:00
|
|
|
#ifdef DEBUG_GDS_ALLOC
|
2009-11-03 14:30:37 +01:00
|
|
|
, const char* fileName, int line
|
2003-01-07 17:35:10 +01:00
|
|
|
#endif
|
2009-11-03 14:30:37 +01:00
|
|
|
) throw (std::bad_alloc)
|
|
|
|
{
|
|
|
|
void *block = allocate((int) size
|
|
|
|
#ifdef DEBUG_GDS_ALLOC
|
|
|
|
, fileName, line
|
|
|
|
#endif
|
|
|
|
);
|
|
|
|
memset (block, 0, size);
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
return block;
|
2003-01-03 17:03:30 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::deallocate(void* block) throw ()
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
release(block);
|
|
|
|
}
|
2004-08-10 06:10:47 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::deletePool(MemoryPool* pool)
|
|
|
|
{
|
|
|
|
delete pool;
|
|
|
|
}
|
2004-08-10 06:10:47 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::validate(void) throw ()
|
|
|
|
{
|
|
|
|
unsigned int slot = 3;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2011-04-04 15:37:47 +02:00
|
|
|
for (const MemBlock* block = freeObjects [slot]; block; block = (MemBlock*) block->pool)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2011-04-02 06:13:03 +02:00
|
|
|
if (slot != block->length / roundingSize)
|
2011-04-04 15:37:47 +02:00
|
|
|
corrupt("length trashed for block in slot");
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
2003-01-03 17:03:30 +01:00
|
|
|
}
|
2008-12-05 02:20:14 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
void MemoryPool::print_contents(const char* filename, bool used_only, const char* filter_path) throw ()
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
FILE* out = fopen(filename, "w");
|
|
|
|
if (!out)
|
2004-08-10 06:10:47 +02:00
|
|
|
return;
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
print_contents(out, used_only, filter_path);
|
|
|
|
fclose(out);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef MEM_DEBUG
|
|
|
|
static void print_block(bool used, FILE* file, MemHeader* blk, bool used_only,
|
|
|
|
const char* filter_path, const size_t filter_len) throw ()
|
|
|
|
{
|
|
|
|
if (used || !used_only)
|
2004-08-22 23:28:19 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
bool filter = filter_path != NULL;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (used && filter && blk->fileName)
|
|
|
|
filter = strncmp(filter_path, blk->fileName, filter_len) != 0;
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (!filter)
|
2003-01-03 17:03:30 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
if (used)
|
2009-11-04 01:20:56 +01:00
|
|
|
{
|
2011-01-21 18:18:40 +01:00
|
|
|
fprintf(file, "USED %p: size=%" SIZEFORMAT " allocated at %s:%d\n",
|
|
|
|
blk, blk->length, blk->fileName, blk->lineNumber);
|
2009-11-04 01:20:56 +01:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
else
|
2011-01-21 18:18:40 +01:00
|
|
|
fprintf(file, "FREE %p: size=%" SIZEFORMAT "\n", blk, blk->length);
|
2002-12-14 22:43:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif
|
2002-12-14 22:43:18 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// This member function can't be const because there are calls to the mutex.
|
|
|
|
void MemoryPool::print_contents(FILE* file, bool used_only, const char* filter_path) throw ()
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
#ifdef MEM_DEBUG
|
|
|
|
MutexLockGuard guard(mutex, "MemoryPool::print_contents");
|
2009-07-12 19:37:55 +02:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
fprintf(file, "********* Printing contents of pool %p used=%ld mapped=%ld\n",
|
|
|
|
this, (long)used_memory.value(), (long)mapped_memory.value());
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
if (!used_only)
|
2009-07-12 19:37:55 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
filter_path = NULL;
|
2009-07-12 19:37:55 +02:00
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
const size_t filter_len = filter_path ? strlen(filter_path) : 0;
|
2004-03-26 00:12:50 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Print small hunks
|
|
|
|
for (MemSmallHunk* hunk = smallHunks; hunk; hunk = hunk->nextHunk)
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
int l = ROUNDUP(sizeof (MemSmallHunk), sizeof (double));
|
|
|
|
UCHAR* ptr = ((UCHAR*) hunk) + l;
|
|
|
|
size_t used = hunk->length - hunk->spaceRemaining;
|
2009-11-04 01:20:56 +01:00
|
|
|
fprintf(file, "\nSmall hunk %p size=%ld used=%ld remain=%ld\n",
|
2009-11-03 14:30:37 +01:00
|
|
|
hunk, hunk->length, used, hunk->spaceRemaining);
|
|
|
|
while (ptr < hunk->memory)
|
|
|
|
{
|
|
|
|
MemHeader* m = (MemHeader*)ptr;
|
|
|
|
print_block(m->fileName != NULL, file, m, used_only, filter_path, filter_len);
|
2011-01-21 18:18:40 +01:00
|
|
|
ptr += m->length;
|
2002-12-14 22:43:18 +01:00
|
|
|
}
|
2004-01-28 08:50:41 +01:00
|
|
|
}
|
2009-11-04 01:20:56 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Print big hunks
|
|
|
|
for (MemBigHunk* hunk = bigHunks; hunk; hunk = hunk->nextHunk)
|
2009-02-03 12:02:00 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
fprintf(file, "\nBig hunk %p size=%ld\n", hunk, hunk->length);
|
|
|
|
for (MemBigObject* block = (MemBigObject*) &hunk->blocks; block; block = block->next)
|
2003-01-03 17:03:30 +01:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
print_block(block->memHeader.pool != NULL, file, &block->memHeader, used_only, filter_path, filter_len);
|
2002-12-14 22:43:18 +01:00
|
|
|
}
|
|
|
|
}
|
2009-11-03 14:30:37 +01:00
|
|
|
#endif
|
2003-01-10 13:27:57 +01:00
|
|
|
}
|
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
// Declare thread-specific variable for context memory pool
|
|
|
|
TLS_DECLARE(MemoryPool*, contextPool);
|
2003-01-07 17:35:10 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
MemoryPool* MemoryPool::setContextPool(MemoryPool* newPool)
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
2009-11-03 14:30:37 +01:00
|
|
|
MemoryPool* const old = TLS_GET(contextPool);
|
|
|
|
TLS_SET(contextPool, newPool);
|
|
|
|
return old;
|
|
|
|
}
|
2008-02-20 16:25:20 +01:00
|
|
|
|
2009-11-03 14:30:37 +01:00
|
|
|
MemoryPool* MemoryPool::getContextPool()
|
|
|
|
{
|
|
|
|
return TLS_GET(contextPool);
|
2003-01-16 18:47:10 +01:00
|
|
|
}
|
|
|
|
|
2008-04-19 13:11:10 +02:00
|
|
|
MemoryPool& AutoStorage::getAutoMemoryPool()
|
2008-12-05 02:20:14 +01:00
|
|
|
{
|
2005-02-16 09:46:48 +01:00
|
|
|
MemoryPool* p = MemoryPool::getContextPool();
|
|
|
|
if (! p)
|
|
|
|
{
|
|
|
|
p = getDefaultMemoryPool();
|
|
|
|
}
|
2010-03-24 13:11:59 +01:00
|
|
|
|
2005-02-16 09:46:48 +01:00
|
|
|
fb_assert(p);
|
2008-12-05 02:20:14 +01:00
|
|
|
return *p;
|
2005-02-16 09:46:48 +01:00
|
|
|
}
|
|
|
|
|
2009-07-30 15:27:38 +02:00
|
|
|
#ifdef LIBC_CALLS_NEW
|
2009-08-01 20:41:46 +02:00
|
|
|
void* MemoryPool::globalAlloc(size_t s) THROW_BAD_ALLOC
|
2009-07-30 15:27:38 +02:00
|
|
|
{
|
|
|
|
if (!processMemoryPool)
|
|
|
|
{
|
|
|
|
// this will do all required init, including processMemoryPool creation
|
|
|
|
static Firebird::InstanceControl dummy;
|
|
|
|
fb_assert(processMemoryPool);
|
|
|
|
}
|
|
|
|
|
|
|
|
return processMemoryPool->allocate(s
|
|
|
|
#ifdef DEBUG_GDS_ALLOC
|
|
|
|
,__FILE__, __LINE__
|
|
|
|
#endif
|
|
|
|
);
|
|
|
|
}
|
|
|
|
#endif // LIBC_CALLS_NEW
|
|
|
|
|
2005-02-16 09:46:48 +01:00
|
|
|
#if defined(DEV_BUILD)
|
2008-04-19 13:11:10 +02:00
|
|
|
void AutoStorage::ProbeStack() const
|
|
|
|
{
|
2005-02-16 09:46:48 +01:00
|
|
|
//
|
2008-12-05 02:20:14 +01:00
|
|
|
// AutoStorage() default constructor can be used only
|
|
|
|
// for objects on the stack. ProbeStack() uses the
|
2005-02-16 09:46:48 +01:00
|
|
|
// following assumptions to check it:
|
|
|
|
// 1. One and only one stack is used for all kind of variables.
|
2009-07-10 17:14:07 +02:00
|
|
|
// 2. Objects don't grow > 128K.
|
2005-02-16 09:46:48 +01:00
|
|
|
//
|
2009-02-05 02:08:13 +01:00
|
|
|
char probeVar = '\0';
|
|
|
|
const char* myStack = &probeVar;
|
|
|
|
const char* thisLocation = (const char*) this;
|
|
|
|
ptrdiff_t distance = thisLocation - myStack;
|
2009-11-03 14:30:37 +01:00
|
|
|
fb_assert(absVal(distance) < 128 * 1024);
|
2005-02-16 09:46:48 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2004-06-14 01:45:02 +02:00
|
|
|
} // namespace Firebird
|