8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-02-01 08:43:02 +01:00
firebird-mirror/src/common/classes/alloc.h

556 lines
14 KiB
C
Raw Normal View History

/*
* PROGRAM: Client/Server Common Code
* MODULE: alloc.h
* DESCRIPTION: Memory Pool Manager (based on B+ tree)
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Nickolay Samofatov
* for the Firebird Open Source RDBMS project.
*
* STL allocator is based on one by Mike Nordell and John Bellardo
*
* Copyright (c) 2004 Nickolay Samofatov <nickolay@broadviewsoftware.com>
* and all contributors signed below.
*
* All Rights Reserved.
*
2009-11-03 14:30:37 +01:00
* The Original Code was created by James A. Starkey for IBPhoenix.
*
* Copyright (c) 2004 James A. Starkey
* All Rights Reserved.
*
* Contributor(s):
2008-12-05 01:56:15 +01:00
*
* Alex Peshkoff <peshkoff@mail.ru>
* added PermanentStorage and AutoStorage classes.
2009-11-03 14:30:37 +01:00
* merged parts of Nickolay and Jim code to be used together
*
*/
2004-03-07 08:58:55 +01:00
#ifndef CLASSES_ALLOC_H
#define CLASSES_ALLOC_H
2007-09-26 19:48:20 +02:00
#include "firebird.h"
#include "fb_types.h"
2009-11-03 14:30:37 +01:00
#include "../common/classes/locks.h"
#include "../common/classes/auto.h"
#include "../common/classes/fb_atomic.h"
2007-09-26 19:48:20 +02:00
2004-04-29 00:00:03 +02:00
#include <stdio.h>
2009-11-03 14:30:37 +01:00
#if defined(MVS) || defined(__VMS) || defined (DARWIN)
#include <stdlib.h>
#else
#include <malloc.h>
#endif
2009-11-03 14:30:37 +01:00
#include <memory.h>
2009-11-03 14:30:37 +01:00
#undef MEM_DEBUG
#ifdef DEBUG_GDS_ALLOC
#define MEM_DEBUG
#endif
2009-11-03 14:30:37 +01:00
#ifdef USE_VALGRIND
// Size of Valgrind red zone applied before and after memory block allocated for user
2009-11-03 14:30:37 +01:00
#define VALGRIND_REDZONE 0 //8
2008-12-05 01:56:15 +01:00
// When memory block is deallocated by user from the pool it must pass queue of this
// length before it is actually deallocated and access protection from it removed.
2004-07-31 00:38:08 +02:00
#define DELAYED_FREE_COUNT 1024
// When memory extent is deallocated when pool is destroying it must pass through
// queue of this length before it is actually returned to system
#define DELAYED_EXTENT_COUNT 32
2009-11-03 14:30:37 +01:00
#undef MEM_DEBUG // valgrind works instead
#else
#define VALGRIND_REDZONE 8
2004-07-31 00:38:08 +02:00
#endif
2009-11-03 14:30:37 +01:00
namespace Firebird {
// Alignment for all memory blocks. Sizes of memory blocks in headers are measured in this units
const size_t ALLOC_ALIGNMENT = FB_ALIGNMENT;
2008-04-19 13:11:10 +02:00
static inline size_t MEM_ALIGN(size_t value)
{
return FB_ALIGN(value, ALLOC_ALIGNMENT);
}
static const unsigned int DEFAULT_ROUNDING = 8;
static const unsigned int DEFAULT_CUTOFF = 4096;
static const size_t DEFAULT_ALLOCATION = 65536;
2009-11-03 14:30:37 +01:00
class MemoryPool;
2008-04-19 13:11:10 +02:00
class MemoryStats
{
public:
explicit MemoryStats(MemoryStats* parent = NULL)
: mst_parent(parent), mst_usage(0), mst_mapped(0), mst_max_usage(0), mst_max_mapped(0)
{}
~MemoryStats()
{}
2009-11-03 14:30:37 +01:00
size_t getCurrentUsage() const throw () { return mst_usage.value(); }
size_t getMaximumUsage() const throw () { return mst_max_usage; }
size_t getCurrentMapping() const throw () { return mst_mapped.value(); }
size_t getMaximumMapping() const throw () { return mst_max_mapped; }
private:
// Forbid copying/assignment
MemoryStats(const MemoryStats&);
MemoryStats& operator=(const MemoryStats&);
MemoryStats* mst_parent;
// Currently allocated memory (without allocator overhead)
// Useful for monitoring engine memory leaks
AtomicCounter mst_usage;
// Amount of memory mapped (including all overheads)
// Useful for monitoring OS memory consumption
AtomicCounter mst_mapped;
// We don't particularily care about extreme precision of these max values,
2009-11-03 14:30:37 +01:00
// this is why we don't synchronize them
size_t mst_max_usage;
size_t mst_max_mapped;
2009-11-03 14:30:37 +01:00
// These methods are thread-safe due to usage of atomic counters only
void increment_usage(size_t size) throw ()
2008-04-19 13:11:10 +02:00
{
2009-11-03 14:30:37 +01:00
for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
2008-04-19 13:11:10 +02:00
{
const size_t temp = statistics->mst_usage.exchangeAdd(size) + size;
2009-11-03 14:30:37 +01:00
if (temp > statistics->mst_max_usage)
statistics->mst_max_usage = temp;
}
2009-11-03 14:30:37 +01:00
}
void decrement_usage(size_t size) throw ()
{
for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
2008-04-19 13:11:10 +02:00
{
2009-11-03 14:30:37 +01:00
statistics->mst_usage -= size;
}
2009-11-03 14:30:37 +01:00
}
2009-11-03 14:30:37 +01:00
void increment_mapping(size_t size) throw ()
{
for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
{
const size_t temp = statistics->mst_mapped.exchangeAdd(size) + size;
2009-11-03 14:30:37 +01:00
if (temp > statistics->mst_max_mapped)
statistics->mst_max_mapped = temp;
}
}
2009-11-03 14:30:37 +01:00
void decrement_mapping(size_t size) throw ()
{
for (MemoryStats* statistics = this; statistics; statistics = statistics->mst_parent)
{
statistics->mst_mapped -= size;
}
}
2009-11-03 14:30:37 +01:00
friend class MemoryPool;
};
2006-05-03 12:50:13 +02:00
2009-11-03 14:30:37 +01:00
typedef SLONG INT32;
class MemBlock;
2009-11-04 01:20:56 +01:00
class MemHeader
2009-11-03 14:30:37 +01:00
{
public:
union
{
MemoryPool* pool;
MemBlock* next;
};
size_t length;
2009-11-03 14:30:37 +01:00
#ifdef DEBUG_GDS_ALLOC
INT32 lineNumber;
const char *fileName;
#endif
#if defined(USE_VALGRIND) && (VALGRIND_REDZONE != 0)
const char mbk_valgrind_redzone[VALGRIND_REDZONE];
#endif
};
2009-11-04 01:20:56 +01:00
class MemBlock : public MemHeader
2009-11-03 14:30:37 +01:00
{
public:
UCHAR body;
};
2009-11-03 14:30:37 +01:00
class MemBigObject;
2008-12-05 01:56:15 +01:00
2009-11-03 14:30:37 +01:00
class MemBigHeader
{
public:
MemBigObject *next;
MemBigObject *prior;
};
2003-01-10 13:27:57 +01:00
2009-11-03 14:30:37 +01:00
class MemBigObject : public MemBigHeader
{
public:
MemHeader memHeader;
};
2003-01-18 19:02:12 +01:00
2008-12-05 01:56:15 +01:00
2009-11-04 01:20:56 +01:00
class MemFreeBlock : public MemBigObject
2009-11-03 14:30:37 +01:00
{
public:
MemFreeBlock *nextLarger;
MemFreeBlock *priorSmaller;
MemFreeBlock *nextTwin;
MemFreeBlock *priorTwin;
};
2003-01-10 13:27:57 +01:00
2009-11-04 01:20:56 +01:00
class MemSmallHunk
2009-11-03 14:30:37 +01:00
{
public:
MemSmallHunk *nextHunk;
size_t length;
UCHAR *memory;
size_t spaceRemaining;
};
2009-11-04 01:20:56 +01:00
class MemBigHunk
2009-11-03 14:30:37 +01:00
{
public:
MemBigHunk *nextHunk;
size_t length;
MemBigHeader blocks;
};
2008-12-05 01:56:15 +01:00
2009-11-03 14:30:37 +01:00
class MemoryPool
{
private:
MemoryPool(MemoryPool& parent, MemoryStats& stats,
2009-11-04 01:20:56 +01:00
bool shared = true, int rounding = DEFAULT_ROUNDING,
int cutoff = DEFAULT_CUTOFF, int minAllocation = DEFAULT_ALLOCATION);
2011-04-02 06:16:48 +02:00
explicit MemoryPool(bool shared = true, int rounding = DEFAULT_ROUNDING,
int cutoff = DEFAULT_CUTOFF, int minAllocation = DEFAULT_ALLOCATION);
void init(void* memory, size_t length);
2009-11-03 14:30:37 +01:00
virtual ~MemoryPool(void);
public:
2009-11-03 14:30:37 +01:00
static MemoryPool* defaultMemoryManager;
2009-11-04 01:20:56 +01:00
private:
2009-11-03 14:30:37 +01:00
size_t roundingSize, threshold, minAllocation;
//int headerSize;
typedef AtomicPointer<MemBlock> FreeChainPtr;
FreeChainPtr *freeObjects;
2009-11-03 14:30:37 +01:00
MemBigHunk *bigHunks;
MemSmallHunk *smallHunks;
MemFreeBlock freeBlocks;
MemFreeBlock junk;
Mutex mutex;
int blocksAllocated;
int blocksActive;
bool threadShared; // Shared across threads, requires locking
bool pool_destroying;
2008-12-05 01:56:15 +01:00
2009-11-03 14:30:37 +01:00
// Default statistics group for process
static MemoryStats* default_stats_group;
// Statistics group for the pool
MemoryStats* stats;
// Parent pool if present
MemoryPool* parent;
2009-11-03 14:30:37 +01:00
// Memory used
AtomicCounter used_memory, mapped_memory;
2008-12-05 01:56:15 +01:00
2009-11-03 14:30:37 +01:00
protected:
MemBlock* alloc(const size_t length) throw (std::bad_alloc);
2009-11-03 14:30:37 +01:00
void releaseBlock(MemBlock *block) throw ();
2009-11-03 14:30:37 +01:00
public:
void* allocate(size_t size
#ifdef DEBUG_GDS_ALLOC
2009-11-03 14:30:37 +01:00
, const char* fileName = NULL, int line = 0
#endif
2009-11-03 14:30:37 +01:00
) throw (std::bad_alloc);
2008-02-20 16:25:20 +01:00
protected:
2009-11-03 14:30:37 +01:00
void corrupt(const char* text) throw ();
2009-11-03 14:30:37 +01:00
private:
virtual void memoryIsExhausted(void) throw (std::bad_alloc);
void remove(MemFreeBlock* block) throw ();
void insert(MemFreeBlock* block) throw ();
void* allocRaw(size_t length) throw (std::bad_alloc);
void validateFreeList(void) throw ();
void validateBigBlock(MemBigObject* block) throw ();
static void release(void* block) throw ();
void releaseRaw(void *block, size_t size) throw ();
2008-02-20 16:25:20 +01:00
2009-11-03 14:30:37 +01:00
#ifdef USE_VALGRIND
// Circular FIFO buffer of read/write protected blocks pending free operation
MemBlock* delayedFree[DELAYED_FREE_COUNT];
size_t delayedFreeCount;
size_t delayedFreePos;
#endif
2009-11-04 01:20:56 +01:00
public:
2009-11-03 14:30:37 +01:00
static void deletePool(MemoryPool* pool);
static void globalFree(void* block) throw ();
void* calloc(size_t size
#ifdef DEBUG_GDS_ALLOC
, const char* fileName, int line
#endif
) throw (std::bad_alloc);
static void deallocate(void* block) throw ();
void validate(void) throw ();
2009-11-03 14:30:37 +01:00
#ifdef LIBC_CALLS_NEW
static void* globalAlloc(size_t s) throw (std::bad_alloc);
#else
static void* globalAlloc(size_t s) throw (std::bad_alloc)
{
return defaultMemoryManager->allocate(s
#ifdef DEBUG_GDS_ALLOC
, __FILE__, __LINE__
#endif
);
}
#endif // LIBC_CALLS_NEW
2008-02-20 16:25:20 +01:00
// Create memory pool instance
2008-12-24 08:46:11 +01:00
static MemoryPool* createPool(MemoryPool* parent = NULL, MemoryStats& stats = *default_stats_group);
2008-02-20 16:25:20 +01:00
// Set context pool for current thread of execution
2007-05-04 03:18:25 +02:00
static MemoryPool* setContextPool(MemoryPool* newPool);
2008-02-20 16:25:20 +01:00
// Get context pool for current thread of execution
static MemoryPool* getContextPool();
2008-02-20 16:25:20 +01:00
2008-12-05 01:56:15 +01:00
// Set statistics group for pool. Usage counters will be decremented from
// previously set group and added to new
2009-11-03 14:30:37 +01:00
void setStatsGroup(MemoryStats& stats) throw ();
2009-11-03 14:30:37 +01:00
// Just a helper for AutoPtr.
2008-01-29 13:16:41 +01:00
static void clear(MemoryPool* pool)
{
deletePool(pool);
}
2009-11-03 14:30:37 +01:00
// Initialize and finalize global memory pool
static void init();
static void cleanup();
2008-12-05 01:56:15 +01:00
2009-11-03 14:30:37 +01:00
// Statistics
void increment_usage(size_t size) throw ()
{
2009-11-03 14:30:37 +01:00
stats->increment_usage(size);
used_memory += size;
}
2009-11-03 14:30:37 +01:00
void decrement_usage(size_t size) throw ()
2008-04-19 13:11:10 +02:00
{
2009-11-03 14:30:37 +01:00
stats->decrement_usage(size);
used_memory -= size;
}
2008-12-05 01:56:15 +01:00
2009-11-03 14:30:37 +01:00
void increment_mapping(size_t size) throw ()
{
stats->increment_mapping(size);
mapped_memory += size;
}
2003-01-10 13:27:57 +01:00
2009-11-03 14:30:37 +01:00
void decrement_mapping(size_t size) throw ()
{
stats->decrement_mapping(size);
mapped_memory -= size;
}
2008-12-05 01:56:15 +01:00
2009-11-03 14:30:37 +01:00
// Print out pool contents. This is debugging routine
void print_contents(FILE*, bool = false, const char* filter_path = 0) throw ();
// The same routine, but more easily callable from the debugger
void print_contents(const char* filename, bool = false, const char* filter_path = 0) throw ();
};
2009-11-03 14:30:37 +01:00
} // namespace Firebird
static inline Firebird::MemoryPool* getDefaultMemoryPool() throw()
2009-11-03 14:30:37 +01:00
{
fb_assert(Firebird::MemoryPool::defaultMemoryManager);
2009-11-03 14:30:37 +01:00
return Firebird::MemoryPool::defaultMemoryManager;
}
namespace Firebird {
// Class intended to manage execution context pool stack
2008-12-05 01:56:15 +01:00
// Declare instance of this class when you need to set new context pool and it
// will be restored automatically as soon holder variable gets out of scope
2008-04-19 13:11:10 +02:00
class ContextPoolHolder
{
public:
2008-12-24 08:46:11 +01:00
explicit ContextPoolHolder(MemoryPool* newPool)
2008-04-19 13:11:10 +02:00
{
savedPool = MemoryPool::setContextPool(newPool);
}
2008-04-19 13:11:10 +02:00
~ContextPoolHolder()
{
MemoryPool::setContextPool(savedPool);
}
private:
2008-12-05 01:56:15 +01:00
MemoryPool* savedPool;
};
2004-09-07 10:35:29 +02:00
// template enabling common use of old and new pools control code
// to be dropped when old-style code goes away
template <typename SubsystemThreadData, typename SubsystemPool>
2008-12-24 08:46:11 +01:00
class SubsystemContextPoolHolder : public ContextPoolHolder
{
public:
2008-12-05 01:56:15 +01:00
SubsystemContextPoolHolder <SubsystemThreadData, SubsystemPool>
(
2008-12-05 01:56:15 +01:00
SubsystemThreadData* subThreadData,
SubsystemPool* newPool
2008-12-05 01:56:15 +01:00
)
: ContextPoolHolder(newPool),
savedThreadData(subThreadData),
2008-12-05 01:56:15 +01:00
savedPool(savedThreadData->getDefaultPool())
{
savedThreadData->setDefaultPool(newPool);
}
2008-04-19 13:11:10 +02:00
~SubsystemContextPoolHolder()
{
savedThreadData->setDefaultPool(savedPool);
}
private:
SubsystemThreadData* savedThreadData;
SubsystemPool* savedPool;
};
2004-06-13 05:09:29 +02:00
} // namespace Firebird
using Firebird::MemoryPool;
// Global versions of operators new and delete
2009-11-03 14:30:37 +01:00
inline void* operator new(size_t s) throw (std::bad_alloc)
{
2009-11-03 14:30:37 +01:00
return MemoryPool::globalAlloc(s);
}
2009-11-03 14:30:37 +01:00
inline void* operator new[](size_t s) throw (std::bad_alloc)
{
2009-11-03 14:30:37 +01:00
return MemoryPool::globalAlloc(s);
}
inline void operator delete(void* mem) throw()
{
2009-11-03 14:30:37 +01:00
MemoryPool::globalFree(mem);
}
inline void operator delete[](void* mem) throw()
{
2009-11-03 14:30:37 +01:00
MemoryPool::globalFree(mem);
}
#ifdef DEBUG_GDS_ALLOC
2008-04-19 13:11:10 +02:00
inline void* operator new(size_t s, Firebird::MemoryPool& pool, const char* file, int line)
{
return pool.allocate(s, file, line);
2003-02-25 02:11:29 +01:00
}
2008-04-19 13:11:10 +02:00
inline void* operator new[](size_t s, Firebird::MemoryPool& pool, const char* file, int line)
{
return pool.allocate(s, file, line);
2003-02-25 02:11:29 +01:00
}
2004-11-24 10:22:07 +01:00
#define FB_NEW(pool) new(pool, __FILE__, __LINE__)
#define FB_NEW_RPT(pool, count) new(pool, count, __FILE__, __LINE__)
#else
2008-04-19 13:11:10 +02:00
inline void* operator new(size_t s, Firebird::MemoryPool& pool)
{
2003-02-25 02:11:29 +01:00
return pool.allocate(s);
}
2008-04-19 13:11:10 +02:00
inline void* operator new[](size_t s, Firebird::MemoryPool& pool)
{
2003-02-25 02:11:29 +01:00
return pool.allocate(s);
}
#define FB_NEW(pool) new(pool)
2004-11-24 10:22:07 +01:00
#define FB_NEW_RPT(pool, count) new(pool, count)
#endif
2006-05-20 02:59:22 +02:00
namespace Firebird
{
2008-02-02 18:43:59 +01:00
// Global storage makes it possible to use new and delete for classes,
// based on it, to behave traditionally, i.e. get memory from permanent pool.
2008-04-19 13:11:10 +02:00
class GlobalStorage
{
2008-02-02 18:43:59 +01:00
public:
void* operator new(size_t size)
{
return getDefaultMemoryPool()->allocate(size);
}
void operator delete(void* mem)
{
getDefaultMemoryPool()->deallocate(mem);
}
2008-12-05 01:56:15 +01:00
MemoryPool& getPool() const
{
2008-02-02 18:43:59 +01:00
return *getDefaultMemoryPool();
}
};
2008-12-05 01:56:15 +01:00
2008-02-02 18:43:59 +01:00
// Permanent storage is used as base class for all objects,
2008-12-05 01:56:15 +01:00
// performing memory allocation in methods other than
// constructors of this objects. Permanent means that pool,
// which will be later used for such allocations, must
// be explicitly passed in all constructors of such object.
2008-04-19 13:11:10 +02:00
class PermanentStorage
{
private:
MemoryPool& pool;
protected:
explicit PermanentStorage(MemoryPool& p) : pool(p) { }
MemoryPool& getPool() const { return pool; }
};
// Automatic storage is used as base class for objects,
// that may have constructors without explicit MemoryPool
// parameter. In this case AutoStorage sends AutoMemoryPool
// to PermanentStorage. To ensure this operation to be safe
// such trick possible only for local (on stack) variables.
2008-04-19 13:11:10 +02:00
class AutoStorage : public PermanentStorage
{
private:
#if defined(DEV_BUILD)
void ProbeStack() const;
#endif
public:
static MemoryPool& getAutoMemoryPool();
protected:
2009-04-04 18:39:31 +02:00
AutoStorage()
2008-12-24 08:46:11 +01:00
: PermanentStorage(getAutoMemoryPool())
2008-04-19 13:11:10 +02:00
{
#if defined(DEV_BUILD)
ProbeStack();
#endif
}
explicit AutoStorage(MemoryPool& p) : PermanentStorage(p) { }
};
2008-01-29 13:16:41 +01:00
typedef AutoPtr<MemoryPool, MemoryPool> AutoMemoryPool;
2004-06-13 05:09:29 +02:00
} // namespace Firebird
2004-03-07 08:58:55 +01:00
#endif // CLASSES_ALLOC_H