8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-02-01 08:43:02 +01:00
firebird-mirror/src/common/classes/alloc.h

534 lines
16 KiB
C
Raw Normal View History

/*
* PROGRAM: Client/Server Common Code
* MODULE: alloc.h
* DESCRIPTION: Memory Pool Manager (based on B+ tree)
*
* The contents of this file are subject to the Initial
* Developer's Public License Version 1.0 (the "License");
* you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
*
* Software distributed under the License is distributed AS IS,
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
* See the License for the specific language governing rights
* and limitations under the License.
*
* The Original Code was created by Nickolay Samofatov
* for the Firebird Open Source RDBMS project.
*
* STL allocator is based on one by Mike Nordell and John Bellardo
*
* Copyright (c) 2004 Nickolay Samofatov <nickolay@broadviewsoftware.com>
* and all contributors signed below.
*
* All Rights Reserved.
*
* Contributor(s):
*
* Alex Peshkoff <peshkoff@mail.ru>
* added PermanentStorage and AutoStorage classes.
*
*
*/
2004-03-07 08:58:55 +01:00
#ifndef CLASSES_ALLOC_H
#define CLASSES_ALLOC_H
2007-09-26 19:48:20 +02:00
#include "firebird.h"
#include "fb_types.h"
2004-04-29 00:00:03 +02:00
#include <stdio.h>
#include "../jrd/common.h"
#include "../common/classes/fb_atomic.h"
#include "../common/classes/tree.h"
#include "../common/classes/locks.h"
#ifdef HAVE_STDLIB_H
#include <stdlib.h> /* XPG: prototypes for malloc/free have to be in
stdlib.h (EKU) */
#endif
2003-08-11 13:21:21 +02:00
#ifdef _MSC_VER
#define THROW_BAD_ALLOC
#else
#define THROW_BAD_ALLOC throw (Firebird::BadAlloc)
2003-08-11 13:21:21 +02:00
#endif
2004-07-31 00:38:08 +02:00
#ifdef USE_VALGRIND
// Size of Valgrind red zone applied before and after memory block allocated for user
2004-07-31 00:38:08 +02:00
#define VALGRIND_REDZONE 8
// When memory block is deallocated by user from the pool it must pass queue of this
// length before it is actually deallocated and access protection from it removed.
2004-07-31 00:38:08 +02:00
#define DELAYED_FREE_COUNT 1024
// When memory extent is deallocated when pool is destroying it must pass through
// queue of this length before it is actually returned to system
#define DELAYED_EXTENT_COUNT 32
2004-07-31 00:38:08 +02:00
#endif
namespace Firebird {
// Maximum number of B+ tree pages kept spare for tree allocation
// Since we store only unique fragment lengths in our tree there
// shouldn't be more than 16K elements in it. This is why MAX_TREE_DEPTH
// equal to 4 is more than enough
const int MAX_TREE_DEPTH = 4;
// Alignment for all memory blocks. Sizes of memory blocks in headers are measured in this units
const size_t ALLOC_ALIGNMENT = ALIGNMENT;
static inline size_t MEM_ALIGN(size_t value) {
return FB_ALIGN(value, ALLOC_ALIGNMENT);
}
// Flags for memory block
const USHORT MBK_LARGE = 1; // Block is large, allocated from OS directly
const USHORT MBK_PARENT = 2; // Block is allocated from parent pool
const USHORT MBK_USED = 4; // Block is used
const USHORT MBK_LAST = 8; // Block is last in the extent
const USHORT MBK_DELAYED = 16; // Block is pending in the delayed free queue
2004-08-10 06:10:47 +02:00
struct FreeMemoryBlock {
FreeMemoryBlock* fbk_next_fragment;
};
// Block header.
// Has size of 12 bytes for 32-bit targets and 16 bytes on 64-bit ones
struct MemoryBlock {
USHORT mbk_flags;
SSHORT mbk_type;
union {
struct {
// Length and offset are measured in bytes thus memory extent size is limited to 64k
// Larger extents are not needed now, but this may be icreased later via using allocation units
USHORT mbk_length; // Actual block size: header not included, redirection list is included if applicable
USHORT mbk_prev_length;
2007-04-11 17:55:30 +02:00
} mbk_small;
// Measured in bytes
ULONG mbk_large_length;
};
#ifdef DEBUG_GDS_ALLOC
const char* mbk_file;
int mbk_line;
#endif
2004-08-10 06:10:47 +02:00
union {
class MemoryPool* mbk_pool;
FreeMemoryBlock* mbk_prev_fragment;
};
2004-07-31 00:38:08 +02:00
#if defined(USE_VALGRIND) && (VALGRIND_REDZONE != 0)
const char mbk_valgrind_redzone[VALGRIND_REDZONE];
#endif
};
2004-08-10 06:10:47 +02:00
// This structure is appended to the end of block redirected to parent pool or operating system
// It is a doubly-linked list which we are going to use when our pool is going to be deleted
struct MemoryRedirectList {
MemoryBlock* mrl_prev;
MemoryBlock* mrl_next;
};
const SSHORT TYPE_POOL = -1;
const SSHORT TYPE_EXTENT = -2;
const SSHORT TYPE_LEAFPAGE = -3;
const SSHORT TYPE_TREEPAGE = -4;
// We store BlkInfo structures instead of BlkHeader pointers to get benefits from
// processor cache-hit optimizations
struct BlockInfo {
2004-08-10 06:10:47 +02:00
size_t bli_length;
FreeMemoryBlock* bli_fragments;
inline static const size_t& generate(const void* sender, const BlockInfo& i) {
return i.bli_length;
}
};
struct MemoryExtent {
MemoryExtent *mxt_next;
MemoryExtent *mxt_prev;
};
struct PendingFreeBlock {
PendingFreeBlock *next;
};
class MemoryStats {
public:
MemoryStats() : mst_usage(0), mst_mapped(0), mst_max_usage(0), mst_max_mapped(0) {}
~MemoryStats() {}
size_t get_current_usage() const { return mst_usage.value(); }
size_t get_maximum_usage() const { return mst_max_usage; }
size_t get_current_mapping() const { return mst_mapped.value(); }
size_t get_maximum_mapping() const { return mst_max_mapped; }
private:
// Forbid copy constructor
MemoryStats(const MemoryStats& object) {}
// Currently allocated memory (without allocator overhead)
// Useful for monitoring engine memory leaks
AtomicCounter mst_usage;
// Amount of memory mapped (including all overheads)
// Useful for monitoring OS memory consumption
AtomicCounter mst_mapped;
// We don't particularily care about extreme precision of these max values,
// this is why we don't synchronize them on Windows
size_t mst_max_usage;
size_t mst_max_mapped;
friend class MemoryPool;
};
// Memory pool based on B+ tree of free memory blocks
// We are going to have two target architectures:
// 1. Multi-process server with customizable lock manager
// 2. Multi-threaded server with single process (SUPERSERVER)
//
// MemoryPool inheritance looks weird because we cannot use
// any pointers to functions in shared memory. VMT usage in
// MemoryPool and its descendants is prohibited
class MemoryPool {
private:
class InternalAllocator {
public:
void* allocate(size_t size) {
return ((MemoryPool*)this)->tree_alloc(size);
}
void deallocate(void* block) {
((MemoryPool*)this)->tree_free(block);
}
};
2004-08-10 06:10:47 +02:00
typedef BePlusTree<BlockInfo, size_t, InternalAllocator, BlockInfo> FreeBlocksTree;
// We keep most of our structures uninitialized as long we redirect
// our allocations to parent pool
bool parent_redirect;
2004-08-10 06:10:47 +02:00
// B+ tree ordered by length
FreeBlocksTree freeBlocks;
MemoryExtent *extents; // Linked list of all memory extents
Vector<void*, 2> spareLeafs;
Vector<void*, MAX_TREE_DEPTH + 1> spareNodes;
bool needSpare;
PendingFreeBlock *pendingFree;
// Synchronization of this object is a little bit tricky. Allocations
// redirected to parent pool are not protected with our mutex and not
// accounted locally, i.e. redirect_amount and parent_redirected linked list
// are synchronized with parent pool mutex only. All other pool members are
// synchronized with this mutex.
2006-05-03 12:50:13 +02:00
Mutex lock;
// Current usage counters for pool. Used to move pool to different statistics group
AtomicCounter used_memory;
size_t mapped_memory;
MemoryPool *parent; // Parent pool. Used to redirect small allocations there
MemoryBlock *parent_redirected, *os_redirected;
size_t redirect_amount; // Amount of memory redirected to parent
// It is protected by parent pool mutex along with redirect list
// Statistics group for the pool
MemoryStats *stats;
2004-07-31 00:38:08 +02:00
#ifdef USE_VALGRIND
// Circular FIFO buffer of read/write protected blocks pending free operation
void* delayedFree[DELAYED_FREE_COUNT];
int delayedFreeHandles[DELAYED_FREE_COUNT];
size_t delayedFreeCount;
size_t delayedFreePos;
#endif
2003-01-10 13:27:57 +01:00
2003-01-18 19:02:12 +01:00
/* Returns NULL in case it cannot allocate requested chunk */
static void* external_alloc(size_t &size);
2003-01-18 19:02:12 +01:00
static void external_free(void* blk, size_t &size, bool pool_destroying);
2003-01-18 19:02:12 +01:00
void* tree_alloc(size_t size);
2003-01-10 13:27:57 +01:00
void tree_free(void* block);
2003-01-10 13:27:57 +01:00
void updateSpare();
inline void addFreeBlock(MemoryBlock* blk);
void removeFreeBlock(MemoryBlock* blk);
void free_blk_extent(MemoryBlock* blk);
// Allocates small block from this pool. Pool must be locked during call
void* internal_alloc(size_t size, SSHORT type = 0
#ifdef DEBUG_GDS_ALLOC
, const char* file = NULL, int line = 0
#endif
);
// Deallocates small block from this pool. Pool must be locked during this call
void internal_deallocate(void* block);
// Forbid copy constructor, should never be called
MemoryPool(const MemoryPool& pool) : freeBlocks((InternalAllocator*)this) { }
// Used by pools to track memory usage.
// These 2 methods are thread-safe due to usage of atomic counters only
inline void increment_usage(size_t size);
inline void decrement_usage(size_t size);
inline void increment_mapping(size_t size);
inline void decrement_mapping(size_t size);
protected:
// Do not allow to create and destroy pool directly from outside
MemoryPool(MemoryPool* _parent, MemoryStats &_stats, void* first_extent, void* root_page);
// This should never be called
~MemoryPool() {
}
// Used to create MemoryPool descendants
static MemoryPool* internal_create(size_t instance_size,
2007-05-04 03:18:25 +02:00
MemoryPool* parent = NULL, MemoryStats& stats = *default_stats_group);
public:
// Default statistics group for process
static MemoryStats* default_stats_group;
// Pool created for process
static MemoryPool* processMemoryPool;
// Create memory pool instance
2007-05-04 03:18:25 +02:00
static MemoryPool* createPool(MemoryPool* parent = NULL, MemoryStats& stats = *default_stats_group) {
return internal_create(sizeof(MemoryPool), parent, stats);
}
// Set context pool for current thread of execution
2007-05-04 03:18:25 +02:00
static MemoryPool* setContextPool(MemoryPool* newPool);
// Get context pool for current thread of execution
static MemoryPool* getContextPool();
// Set statistics group for pool. Usage counters will be decremented from
// previously set group and added to new
2007-05-04 03:18:25 +02:00
void setStatsGroup(MemoryStats& stats);
// Deallocate pool and all its contents
static void deletePool(MemoryPool* pool);
// Allocate memory block. Result is not zero-initialized.
// It case of problems this method throws Firebird::BadAlloc
void* allocate(size_t size, SSHORT type = 0
#ifdef DEBUG_GDS_ALLOC
, const char* file = NULL, int line = 0
#endif
);
// Allocate memory block. In case of problems this method returns NULL
void* allocate_nothrow(size_t size, SSHORT type = 0
#ifdef DEBUG_GDS_ALLOC
, const char* file = NULL, int line = 0
#endif
);
void deallocate(void* block);
2002-12-16 19:33:54 +01:00
// Check pool for internal consistent. When enabled, call is very expensive
2004-08-28 07:15:02 +02:00
bool verify_pool(bool fast_checks_only = false);
// Print out pool contents. This is debugging routine
void print_contents(FILE*, bool = false, const char* filter_path = 0);
// The same routine, but more easily callable from the debugger
void print_contents(const char* filename, bool = false,
const char* filter_path = 0);
// Deallocate memory block. Pool is derived from block header
static void globalFree(void* block) {
if (block)
((MemoryBlock*)((char*)block - MEM_ALIGN(sizeof(MemoryBlock))))->mbk_pool->deallocate(block);
}
// Allocate zero-initialized block of memory
void* calloc(size_t size, SSHORT type = 0
#ifdef DEBUG_GDS_ALLOC
, const char* file = NULL, int line = 0
#endif
) {
void* result = allocate(size, type
#ifdef DEBUG_GDS_ALLOC
, file, line
#endif
);
memset(result, 0, size);
return result;
}
2003-01-10 13:27:57 +01:00
// Initialize and finalize global memory pool
static void init();
static void cleanup();
/// Returns the type associated with the allocated memory.
static SSHORT blk_type(const void* mem) {
return ((MemoryBlock*)((char *)mem - MEM_ALIGN(sizeof(MemoryBlock))))->mbk_type;
}
/// Returns the pool the memory was allocated from.
//static MemoryPool* blk_pool(const void* mem) {
// return ((MemoryBlock*)((char *)mem - MEM_ALIGN(sizeof(MemoryBlock))))->mbk_pool;
//}
2003-01-10 13:27:57 +01:00
friend class InternalAllocator;
};
// Class intended to manage execution context pool stack
// Declare instance of this class when you need to set new context pool and it
// will be restored automatically as soon holder variable gets out of scope
class ContextPoolHolder {
public:
ContextPoolHolder(MemoryPool* newPool) {
savedPool = MemoryPool::setContextPool(newPool);
}
~ContextPoolHolder() {
MemoryPool::setContextPool(savedPool);
}
private:
MemoryPool* savedPool;
};
2004-09-07 10:35:29 +02:00
// template enabling common use of old and new pools control code
// to be dropped when old-style code goes away
template <typename SubsystemThreadData, typename SubsystemPool>
class SubsystemContextPoolHolder
: public ContextPoolHolder
{
public:
SubsystemContextPoolHolder <SubsystemThreadData, SubsystemPool>
(
SubsystemThreadData* subThreadData,
SubsystemPool* newPool
)
: ContextPoolHolder(newPool),
savedThreadData(subThreadData),
savedPool(savedThreadData->getDefaultPool())
{
savedThreadData->setDefaultPool(newPool);
}
~SubsystemContextPoolHolder() {
savedThreadData->setDefaultPool(savedPool);
}
private:
SubsystemThreadData* savedThreadData;
SubsystemPool* savedPool;
};
2004-06-13 05:09:29 +02:00
} // namespace Firebird
using Firebird::MemoryPool;
inline static MemoryPool* getDefaultMemoryPool() { return Firebird::MemoryPool::processMemoryPool; }
// Global versions of operators new and delete
inline void* operator new(size_t s) THROW_BAD_ALLOC
{
return getDefaultMemoryPool()->allocate(s, 0
#ifdef DEBUG_GDS_ALLOC
,__FILE__, __LINE__
#endif
);
}
inline void* operator new[](size_t s) THROW_BAD_ALLOC
{
return getDefaultMemoryPool()->allocate(s, 0
#ifdef DEBUG_GDS_ALLOC
,__FILE__, __LINE__
#endif
);
}
inline void* operator new(size_t, void* ptr) throw()
{
return ptr;
}
inline void* operator new[](size_t, void* ptr) throw()
{
return ptr;
}
inline void operator delete(void* mem) throw()
{
Firebird::MemoryPool::globalFree(mem);
}
inline void operator delete[](void* mem) throw()
{
Firebird::MemoryPool::globalFree(mem);
}
#ifdef DEBUG_GDS_ALLOC
2006-03-03 17:03:14 +01:00
inline void* operator new(size_t s, Firebird::MemoryPool& pool, const char* file, int line) {
2003-02-25 02:11:29 +01:00
return pool.allocate(s, 0, file, line);
}
2006-03-03 17:03:14 +01:00
inline void* operator new[](size_t s, Firebird::MemoryPool& pool, const char* file, int line) {
2003-02-25 02:11:29 +01:00
return pool.allocate(s, 0, file, line);
}
2004-11-24 10:22:07 +01:00
#define FB_NEW(pool) new(pool, __FILE__, __LINE__)
#define FB_NEW_RPT(pool, count) new(pool, count, __FILE__, __LINE__)
#else
2006-03-03 17:03:14 +01:00
inline void* operator new(size_t s, Firebird::MemoryPool& pool) {
2003-02-25 02:11:29 +01:00
return pool.allocate(s);
}
2006-03-03 17:03:14 +01:00
inline void* operator new[](size_t s, Firebird::MemoryPool& pool) {
2003-02-25 02:11:29 +01:00
return pool.allocate(s);
}
#define FB_NEW(pool) new(pool)
2004-11-24 10:22:07 +01:00
#define FB_NEW_RPT(pool, count) new(pool, count)
#endif
2006-05-20 02:59:22 +02:00
namespace Firebird
{
// Permanent storage is used as base class for all objects,
// performing memory allocation in methods other than
// constructors of this objects. Permanent means that pool,
// which will be later used for such allocations, must
// be explicitly passed in all constructors of such object.
class PermanentStorage {
private:
MemoryPool& pool;
protected:
explicit PermanentStorage(MemoryPool& p) : pool(p) { }
MemoryPool& getPool() const { return pool; }
};
// Automatic storage is used as base class for objects,
// that may have constructors without explicit MemoryPool
// parameter. In this case AutoStorage sends AutoMemoryPool
// to PermanentStorage. To ensure this operation to be safe
// such trick possible only for local (on stack) variables.
class AutoStorage : public PermanentStorage {
private:
#if defined(DEV_BUILD)
void ProbeStack() const;
#endif
public:
static MemoryPool& getAutoMemoryPool();
protected:
AutoStorage() : PermanentStorage(getAutoMemoryPool()) {
#if defined(DEV_BUILD)
ProbeStack();
#endif
}
explicit AutoStorage(MemoryPool& p) : PermanentStorage(p) { }
};
2004-06-13 05:09:29 +02:00
} // namespace Firebird
2004-03-07 08:58:55 +01:00
#endif // CLASSES_ALLOC_H