8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-24 02:43:03 +01:00
This commit is contained in:
robocop 2008-04-19 11:11:10 +00:00
parent 22dbbc1a08
commit 05271261e1
17 changed files with 480 additions and 304 deletions

View File

@ -35,7 +35,7 @@ namespace Firebird {
class StatusHolder
{
public:
StatusHolder(const ISC_STATUS* status = NULL)
explicit StatusHolder(const ISC_STATUS* status = NULL)
{
memset(m_status_vector, 0, sizeof(m_status_vector));
m_raised = false;

View File

@ -35,7 +35,8 @@ namespace Firebird {
// Aligns output parameter (i.e. transfers data in destructor).
template <typename C>
class OutAligner {
class OutAligner
{
private:
UCHAR* userBuffer;
#ifdef RISC_ALIGNMENT
@ -81,7 +82,8 @@ public:
// Aligns input parameter.
template <typename C>
class Aligner {
class Aligner
{
private:
#ifdef RISC_ALIGNMENT
Firebird::HalfStaticArray<C, BUFFER_SMALL> localBuffer;

View File

@ -34,7 +34,8 @@
namespace Firebird {
class MetaName {
class MetaName
{
private:
char data[MAX_SQL_IDENTIFIER_SIZE];
unsigned int count;

View File

@ -182,7 +182,8 @@ inline size_t get_map_page_size()
#ifdef USE_VALGRIND
// Circular FIFO buffer of read/write protected extents pending free operation
// Race protected via cache_mutex.
struct DelayedExtent {
struct DelayedExtent
{
void *memory; // Extent pointer
size_t size; // Size of extent
int handle; // Valgrind handle of protected extent block
@ -508,7 +509,8 @@ void* MemoryPool::external_alloc(size_t &size)
# endif
}
void MemoryPool::external_free(void *blk, size_t &size, bool pool_destroying) {
void MemoryPool::external_free(void *blk, size_t &size, bool pool_destroying)
{
# if !defined(DEBUG_GDS_ALLOC) && (defined(WIN_NT) || defined(HAVE_MMAP))
if (size == EXTENT_SIZE) {
MutexLockGuard guard(*cache_mutex);
@ -539,7 +541,8 @@ void MemoryPool::external_free(void *blk, size_t &size, bool pool_destroying) {
#endif
void* MemoryPool::tree_alloc(size_t size) {
void* MemoryPool::tree_alloc(size_t size)
{
if (size == sizeof(FreeBlocksTree::ItemList))
// This condition is to handle case when nodelist and itemlist have equal size
if (sizeof(FreeBlocksTree::ItemList) != sizeof(FreeBlocksTree::NodeList) ||
@ -564,7 +567,8 @@ void* MemoryPool::tree_alloc(size_t size) {
return NULL;
}
void MemoryPool::tree_free(void* block) {
void MemoryPool::tree_free(void* block)
{
// This method doesn't merge nearby pages
((PendingFreeBlock*)block)->next = pendingFree;
ptrToBlock(block)->mbk_flags &= ~MBK_USED;
@ -586,12 +590,15 @@ void* MemoryPool::allocate_nothrow(size_t size
size = MEM_ALIGN(size);
#endif
// Blocks with internal length of zero make allocator unhappy
if (!size) size = MEM_ALIGN(1);
if (!size)
size = MEM_ALIGN(1);
if (parent_redirect) {
if (parent_redirect)
{
// We do not synchronize redirect_amount here. In the worst case we redirect slightly
// more allocations to parent than we wanted. This shouldn't cause problems
if (redirect_amount + size < REDIRECT_THRESHOLD) {
if (redirect_amount + size < REDIRECT_THRESHOLD)
{
parent->lock.enter();
// Allocate block from parent
void* result = parent->internal_alloc(size + MEM_ALIGN(sizeof(MemoryRedirectList)), 0
@ -626,64 +633,64 @@ void* MemoryPool::allocate_nothrow(size_t size
#endif
return result;
}
else {
lock.enter();
if (parent_redirect) { // It may have changed while we were taking the lock
parent_redirect = false;
// Do some hard manual work to initialize first extent
// This is the exact initial layout of memory pool in the first extent //
// MemoryExtent
// MemoryBlock
// FreeBlocksTree::ItemList
// MemoryBlock
// free space
//
// ******************************************************************* //
size_t ext_size = EXTENT_SIZE;
MemoryExtent *extent = (MemoryExtent*)external_alloc(ext_size);
fb_assert(ext_size == EXTENT_SIZE); // Make sure exent size is a multiply of page size
if (!extent) {
lock.leave();
return NULL;
}
extent->mxt_next = NULL;
extent->mxt_prev = NULL;
extents = extent;
increment_mapping(EXTENT_SIZE);
lock.enter();
if (parent_redirect)
{ // It may have changed while we were taking the lock
parent_redirect = false;
// Do some hard manual work to initialize first extent
// This is the exact initial layout of memory pool in the first extent //
// MemoryExtent
// MemoryBlock
// FreeBlocksTree::ItemList
// MemoryBlock
// free space
//
// ******************************************************************* //
size_t ext_size = EXTENT_SIZE;
MemoryExtent *extent = (MemoryExtent*)external_alloc(ext_size);
fb_assert(ext_size == EXTENT_SIZE); // Make sure exent size is a multiply of page size
MemoryBlock* hdr = (MemoryBlock*) ((char*)extent +
MEM_ALIGN(sizeof(MemoryExtent)));
hdr->mbk_pool = this;
hdr->mbk_flags = MBK_USED;
hdr->mbk_type = TYPE_LEAFPAGE;
hdr->mbk_small.mbk_length = MEM_ALIGN(sizeof(FreeBlocksTree::ItemList));
hdr->mbk_small.mbk_prev_length = 0;
spareLeafs.add((char*)hdr + MEM_ALIGN(sizeof(MemoryBlock)));
MemoryBlock* blk = (MemoryBlock *)((char*)extent +
MEM_ALIGN(sizeof(MemoryExtent)) +
MEM_ALIGN(sizeof(MemoryBlock)) +
MEM_ALIGN(sizeof(FreeBlocksTree::ItemList)));
int blockLength = EXTENT_SIZE -
MEM_ALIGN(sizeof(MemoryExtent)) -
MEM_ALIGN(sizeof(MemoryBlock)) -
MEM_ALIGN(sizeof(FreeBlocksTree::ItemList)) -
MEM_ALIGN(sizeof(MemoryBlock));
blk->mbk_flags = MBK_LAST;
blk->mbk_type = 0;
blk->mbk_small.mbk_length = blockLength;
blk->mbk_small.mbk_prev_length = hdr->mbk_small.mbk_length;
blk->mbk_prev_fragment = NULL;
FreeMemoryBlock *freeBlock = blockToPtr<FreeMemoryBlock*>(blk);
freeBlock->fbk_next_fragment = NULL;
BlockInfo temp = {blockLength, freeBlock};
freeBlocks.add(temp);
updateSpare();
if (!extent) {
lock.leave();
return NULL;
}
lock.leave();
extent->mxt_next = NULL;
extent->mxt_prev = NULL;
extents = extent;
increment_mapping(EXTENT_SIZE);
MemoryBlock* hdr = (MemoryBlock*) ((char*)extent +
MEM_ALIGN(sizeof(MemoryExtent)));
hdr->mbk_pool = this;
hdr->mbk_flags = MBK_USED;
hdr->mbk_type = TYPE_LEAFPAGE;
hdr->mbk_small.mbk_length = MEM_ALIGN(sizeof(FreeBlocksTree::ItemList));
hdr->mbk_small.mbk_prev_length = 0;
spareLeafs.add((char*)hdr + MEM_ALIGN(sizeof(MemoryBlock)));
MemoryBlock* blk = (MemoryBlock *)((char*)extent +
MEM_ALIGN(sizeof(MemoryExtent)) +
MEM_ALIGN(sizeof(MemoryBlock)) +
MEM_ALIGN(sizeof(FreeBlocksTree::ItemList)));
int blockLength = EXTENT_SIZE -
MEM_ALIGN(sizeof(MemoryExtent)) -
MEM_ALIGN(sizeof(MemoryBlock)) -
MEM_ALIGN(sizeof(FreeBlocksTree::ItemList)) -
MEM_ALIGN(sizeof(MemoryBlock));
blk->mbk_flags = MBK_LAST;
blk->mbk_type = 0;
blk->mbk_small.mbk_length = blockLength;
blk->mbk_small.mbk_prev_length = hdr->mbk_small.mbk_length;
blk->mbk_prev_fragment = NULL;
FreeMemoryBlock *freeBlock = blockToPtr<FreeMemoryBlock*>(blk);
freeBlock->fbk_next_fragment = NULL;
BlockInfo temp = {blockLength, freeBlock};
freeBlocks.add(temp);
updateSpare();
}
lock.leave();
}
lock.enter();
@ -762,7 +769,8 @@ void* MemoryPool::allocate(size_t size
return result;
}
bool MemoryPool::verify_pool(bool fast_checks_only) {
bool MemoryPool::verify_pool(bool fast_checks_only)
{
lock.enter();
mem_assert(!pendingFree || needSpare); // needSpare flag should be set if we are in
// a critically low memory condition
@ -837,10 +845,12 @@ bool MemoryPool::verify_pool(bool fast_checks_only) {
mem_assert(blk->mbk_small.mbk_prev_length == prev_length); // Prev is correct ?
bool foundPending = false;
for (PendingFreeBlock *tmp = pendingFree; tmp; tmp = tmp->next)
{
if (tmp == (PendingFreeBlock *)((char*)blk + MEM_ALIGN(sizeof(MemoryBlock)))) {
mem_assert(!foundPending); // Block may be in pending list only one time
foundPending = true;
}
}
bool foundTree = false;
if (freeBlocks.locate(blk->mbk_small.mbk_length)) {
// Check previous fragment pointer if block is marked as unused
@ -874,10 +884,12 @@ bool MemoryPool::verify_pool(bool fast_checks_only) {
}
else {
for (FreeMemoryBlock* freeBlk = freeBlocks.current().bli_fragments; freeBlk; freeBlk = freeBlk->fbk_next_fragment)
{
if (ptrToBlock(freeBlk) == blk) {
mem_assert(!foundTree); // Block may be present in free blocks tree only once
foundTree = true;
}
}
}
}
mem_assert(!(foundTree && foundPending)); // Block shouldn't be present both in
@ -1348,7 +1360,8 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
PendingFreeBlock *itr = pendingFree, *prev = NULL;
while (itr) {
MemoryBlock *temp = ptrToBlock(itr);
if (temp->mbk_small.mbk_length >= size) {
if (temp->mbk_small.mbk_length >= size)
{
if (temp->mbk_small.mbk_length - size < MEM_ALIGN(sizeof(MemoryBlock)) + ALLOC_ALIGNMENT)
{
// Block is small enough to be returned AS IS
@ -1365,30 +1378,30 @@ void* MemoryPool::internal_alloc(size_t size, SSHORT type
else
pendingFree = itr->next;
PATTERN_FILL(itr, size, ALLOC_PATTERN);
return itr;
}
else {
// Cut a piece at the end of block
// We don't need to modify tree of free blocks or a list of
// pending free blocks in this case
temp->mbk_small.mbk_length -= MEM_ALIGN(sizeof(MemoryBlock)) + size;
blk = next_block(temp);
blk->mbk_pool = this;
blk->mbk_flags = MBK_USED | (temp->mbk_flags & MBK_LAST);
// Cut a piece at the end of block
// We don't need to modify tree of free blocks or a list of
// pending free blocks in this case
temp->mbk_small.mbk_length -= MEM_ALIGN(sizeof(MemoryBlock)) + size;
blk = next_block(temp);
blk->mbk_pool = this;
blk->mbk_flags = MBK_USED | (temp->mbk_flags & MBK_LAST);
#ifdef DEBUG_GDS_ALLOC
blk->mbk_file = file;
blk->mbk_line = line;
blk->mbk_file = file;
blk->mbk_line = line;
#endif
temp->mbk_flags &= ~MBK_LAST;
blk->mbk_type = type;
blk->mbk_small.mbk_length = size;
blk->mbk_small.mbk_prev_length = temp->mbk_small.mbk_length;
if (!(blk->mbk_flags & MBK_LAST))
next_block(blk)->mbk_small.mbk_prev_length = blk->mbk_small.mbk_length;
void *result = blockToPtr<void*>(blk);
PATTERN_FILL(result, size, ALLOC_PATTERN);
return result;
}
temp->mbk_flags &= ~MBK_LAST;
blk->mbk_type = type;
blk->mbk_small.mbk_length = size;
blk->mbk_small.mbk_prev_length = temp->mbk_small.mbk_length;
if (!(blk->mbk_flags & MBK_LAST))
next_block(blk)->mbk_small.mbk_prev_length = blk->mbk_small.mbk_length;
void *result = blockToPtr<void*>(blk);
PATTERN_FILL(result, size, ALLOC_PATTERN);
return result;
}
prev = itr;
itr = itr->next;
@ -1760,7 +1773,8 @@ void MemoryPool::deallocate(void *block)
lock.leave();
}
MemoryPool& AutoStorage::getAutoMemoryPool() {
MemoryPool& AutoStorage::getAutoMemoryPool()
{
#ifndef SUPERCLIENT
MemoryPool* p = MemoryPool::getContextPool();
if (! p)
@ -1775,7 +1789,8 @@ MemoryPool& AutoStorage::getAutoMemoryPool() {
}
#if defined(DEV_BUILD)
void AutoStorage::ProbeStack() const {
void AutoStorage::ProbeStack() const
{
//
// AutoStorage() default constructor can be used only
// for objects on the stack. ProbeStack() uses the

View File

@ -81,7 +81,8 @@ const int MAX_TREE_DEPTH = 4;
// Alignment for all memory blocks. Sizes of memory blocks in headers are measured in this units
const size_t ALLOC_ALIGNMENT = ALIGNMENT;
static inline size_t MEM_ALIGN(size_t value) {
static inline size_t MEM_ALIGN(size_t value)
{
return FB_ALIGN(value, ALLOC_ALIGNMENT);
}
@ -92,17 +93,21 @@ const USHORT MBK_USED = 4; // Block is used
const USHORT MBK_LAST = 8; // Block is last in the extent
const USHORT MBK_DELAYED = 16; // Block is pending in the delayed free queue
struct FreeMemoryBlock {
struct FreeMemoryBlock
{
FreeMemoryBlock* fbk_next_fragment;
};
// Block header.
// Has size of 12 bytes for 32-bit targets and 16 bytes on 64-bit ones
struct MemoryBlock {
struct MemoryBlock
{
USHORT mbk_flags;
SSHORT mbk_type;
union {
struct {
union
{
struct
{
// Length and offset are measured in bytes thus memory extent size is limited to 64k
// Larger extents are not needed now, but this may be icreased later via using allocation units
USHORT mbk_length; // Actual block size: header not included, redirection list is included if applicable
@ -115,7 +120,8 @@ struct MemoryBlock {
const char* mbk_file;
int mbk_line;
#endif
union {
union
{
class MemoryPool* mbk_pool;
FreeMemoryBlock* mbk_prev_fragment;
};
@ -127,7 +133,8 @@ struct MemoryBlock {
// This structure is appended to the end of block redirected to parent pool or operating system
// It is a doubly-linked list which we are going to use when our pool is going to be deleted
struct MemoryRedirectList {
struct MemoryRedirectList
{
MemoryBlock* mrl_prev;
MemoryBlock* mrl_next;
};
@ -139,24 +146,29 @@ const SSHORT TYPE_TREEPAGE = -4;
// We store BlkInfo structures instead of BlkHeader pointers to get benefits from
// processor cache-hit optimizations
struct BlockInfo {
struct BlockInfo
{
size_t bli_length;
FreeMemoryBlock* bli_fragments;
inline static const size_t& generate(const void* sender, const BlockInfo& i) {
inline static const size_t& generate(const void* sender, const BlockInfo& i)
{
return i.bli_length;
}
};
struct MemoryExtent {
struct MemoryExtent
{
MemoryExtent *mxt_next;
MemoryExtent *mxt_prev;
};
struct PendingFreeBlock {
struct PendingFreeBlock
{
PendingFreeBlock *next;
};
class MemoryStats {
class MemoryStats
{
public:
MemoryStats() : mst_usage(0), mst_mapped(0), mst_max_usage(0), mst_max_mapped(0) {}
~MemoryStats() {}
@ -193,14 +205,18 @@ private:
// MemoryPool inheritance looks weird because we cannot use
// any pointers to functions in shared memory. VMT usage in
// MemoryPool and its descendants is prohibited
class MemoryPool {
class MemoryPool
{
private:
class InternalAllocator {
class InternalAllocator
{
public:
void* allocate(size_t size) {
void* allocate(size_t size)
{
return ((MemoryPool*)this)->tree_alloc(size);
}
void deallocate(void* block) {
void deallocate(void* block)
{
((MemoryPool*)this)->tree_free(block);
}
};
@ -291,8 +307,7 @@ protected:
MemoryPool(MemoryPool* _parent, MemoryStats &_stats, void* first_extent, void* root_page);
// This should never be called
~MemoryPool() {
}
~MemoryPool() {}
public:
// Default statistics group for process
@ -352,7 +367,8 @@ public:
const char* filter_path = 0);
// Deallocate memory block. Pool is derived from block header
static void globalFree(void* block) {
static void globalFree(void* block)
{
if (block)
((MemoryBlock*)((char*)block - MEM_ALIGN(sizeof(MemoryBlock))))->mbk_pool->deallocate(block);
}
@ -387,12 +403,15 @@ public:
// Class intended to manage execution context pool stack
// Declare instance of this class when you need to set new context pool and it
// will be restored automatically as soon holder variable gets out of scope
class ContextPoolHolder {
class ContextPoolHolder
{
public:
ContextPoolHolder(MemoryPool* newPool) {
ContextPoolHolder(MemoryPool* newPool)
{
savedPool = MemoryPool::setContextPool(newPool);
}
~ContextPoolHolder() {
~ContextPoolHolder()
{
MemoryPool::setContextPool(savedPool);
}
private:
@ -417,7 +436,8 @@ public:
{
savedThreadData->setDefaultPool(newPool);
}
~SubsystemContextPoolHolder() {
~SubsystemContextPoolHolder()
{
savedThreadData->setDefaultPool(savedPool);
}
private:
@ -468,19 +488,23 @@ inline void operator delete[](void* mem) throw()
}
#ifdef DEBUG_GDS_ALLOC
inline void* operator new(size_t s, Firebird::MemoryPool& pool, const char* file, int line) {
inline void* operator new(size_t s, Firebird::MemoryPool& pool, const char* file, int line)
{
return pool.allocate(s, file, line);
}
inline void* operator new[](size_t s, Firebird::MemoryPool& pool, const char* file, int line) {
inline void* operator new[](size_t s, Firebird::MemoryPool& pool, const char* file, int line)
{
return pool.allocate(s, file, line);
}
#define FB_NEW(pool) new(pool, __FILE__, __LINE__)
#define FB_NEW_RPT(pool, count) new(pool, count, __FILE__, __LINE__)
#else
inline void* operator new(size_t s, Firebird::MemoryPool& pool) {
inline void* operator new(size_t s, Firebird::MemoryPool& pool)
{
return pool.allocate(s);
}
inline void* operator new[](size_t s, Firebird::MemoryPool& pool) {
inline void* operator new[](size_t s, Firebird::MemoryPool& pool)
{
return pool.allocate(s);
}
#define FB_NEW(pool) new(pool)
@ -492,7 +516,8 @@ namespace Firebird
{
// Global storage makes it possible to use new and delete for classes,
// based on it, to behave traditionally, i.e. get memory from permanent pool.
class GlobalStorage {
class GlobalStorage
{
public:
void* operator new(size_t size)
{
@ -516,7 +541,8 @@ namespace Firebird
// constructors of this objects. Permanent means that pool,
// which will be later used for such allocations, must
// be explicitly passed in all constructors of such object.
class PermanentStorage {
class PermanentStorage
{
private:
MemoryPool& pool;
protected:
@ -529,7 +555,8 @@ namespace Firebird
// parameter. In this case AutoStorage sends AutoMemoryPool
// to PermanentStorage. To ensure this operation to be safe
// such trick possible only for local (on stack) variables.
class AutoStorage : public PermanentStorage {
class AutoStorage : public PermanentStorage
{
private:
#if defined(DEV_BUILD)
void ProbeStack() const;
@ -537,7 +564,8 @@ namespace Firebird
public:
static MemoryPool& getAutoMemoryPool();
protected:
AutoStorage() : PermanentStorage(getAutoMemoryPool()) {
AutoStorage() : PermanentStorage(getAutoMemoryPool())
{
#if defined(DEV_BUILD)
ProbeStack();
#endif

View File

@ -35,15 +35,18 @@ namespace Firebird {
// Static part of the array
template <typename T, size_t Capacity>
class InlineStorage : public AutoStorage {
class InlineStorage : public AutoStorage
{
public:
explicit InlineStorage(MemoryPool& p) : AutoStorage(p) { }
InlineStorage() : AutoStorage() { }
protected:
T* getStorage() {
T* getStorage()
{
return buffer;
}
size_t getStorageSize() const {
size_t getStorageSize() const
{
return Capacity;
}
private:
@ -52,7 +55,8 @@ private:
// Used when array doesn't have static part
template <typename T>
class EmptyStorage : public AutoStorage {
class EmptyStorage : public AutoStorage
{
public:
explicit EmptyStorage(MemoryPool& p) : AutoStorage(p) { }
EmptyStorage() : AutoStorage() { }
@ -63,7 +67,8 @@ protected:
// Dynamic array of simple types
template <typename T, typename Storage = EmptyStorage<T> >
class Array : protected Storage {
class Array : protected Storage
{
public:
explicit Array(MemoryPool& p) :
Storage(p), count(0), capacity(this->getStorageSize()), data(this->getStorage()) { }
@ -85,11 +90,13 @@ public:
}
void clear() { count = 0; }
protected:
const T& getElement(size_t index) const {
const T& getElement(size_t index) const
{
fb_assert(index < count);
return data[index];
}
T& getElement(size_t index) {
T& getElement(size_t index)
{
fb_assert(index < count);
return data[index];
}
@ -108,96 +115,113 @@ public:
count = L.count;
return *this;
}
const T& operator[](size_t index) const {
const T& operator[](size_t index) const
{
return getElement(index);
}
T& operator[](size_t index) {
T& operator[](size_t index)
{
return getElement(index);
}
const T& front() const {
const T& front() const
{
fb_assert(count > 0);
return *data;
}
const T& back() const {
const T& back() const
{
fb_assert(count > 0);
return *(data + count - 1);
}
const T* begin() const { return data; }
const T* end() const { return data + count; }
T& front() {
T& front()
{
fb_assert(count > 0);
return *data;
}
T& back() {
T& back()
{
fb_assert(count > 0);
return *(data + count - 1);
}
T* begin() { return data; }
T* end() { return data + count; }
void insert(size_t index, const T& item) {
void insert(size_t index, const T& item)
{
fb_assert(index <= count);
ensureCapacity(count + 1);
memmove(data + index + 1, data + index, sizeof(T) * (count++ - index));
data[index] = item;
}
void insert(size_t index, const Array<T, Storage>& L) {
void insert(size_t index, const Array<T, Storage>& L)
{
fb_assert(index <= count);
ensureCapacity(count + L.count);
memmove(data + index + L.count, data + index, sizeof(T) * (count - index));
memcpy(data + index, L.data, L.count);
count += L.count;
}
void insert(size_t index, const T* items, size_t itemsSize) {
void insert(size_t index, const T* items, size_t itemsSize)
{
fb_assert(index <= count);
ensureCapacity(count + itemsSize);
memmove(data + index + itemsSize, data + index, sizeof(T) * (count - index));
memcpy(data + index, items, sizeof(T) * itemsSize);
count += itemsSize;
}
size_t add(const T& item) {
size_t add(const T& item)
{
ensureCapacity(count + 1);
data[count] = item;
return ++count;
}
// NOTE: remove method must be signal safe
// This function may be called in AST. The function doesn't wait.
T* remove(size_t index) {
T* remove(size_t index)
{
fb_assert(index < count);
memmove(data + index, data + index + 1, sizeof(T) * (--count - index));
return &data[index];
}
T* removeRange(size_t from, size_t to) {
T* removeRange(size_t from, size_t to)
{
fb_assert(from <= to);
fb_assert(to <= count);
memmove(data + from, data + to, sizeof(T) * (count - to));
count -= (to - from);
return &data[from];
}
T* removeCount(size_t index, size_t n) {
T* removeCount(size_t index, size_t n)
{
fb_assert(index + n <= count);
memmove(data + index, data + index + n, sizeof(T) * (count - index - n));
count -= n;
return &data[index];
}
T* remove(T* itr) {
T* remove(T* itr)
{
const size_t index = itr - begin();
fb_assert(index < count);
memmove(data + index, data + index + 1, sizeof(T) * (--count - index));
return &data[index];
}
void shrink(size_t newCount) {
void shrink(size_t newCount)
{
fb_assert(newCount <= count);
count = newCount;
}
// Grow size of our array and zero-initialize new items
void grow(size_t newCount) {
void grow(size_t newCount)
{
fb_assert(newCount >= count);
ensureCapacity(newCount);
memset(data + count, 0, sizeof(T) * (newCount - count));
count = newCount;
}
// Resize array according to STL's vector::resize() rules
void resize(size_t newCount, const T& val) {
void resize(size_t newCount, const T& val)
{
if (newCount > count) {
ensureCapacity(newCount);
while (count < newCount) {
@ -209,7 +233,8 @@ public:
}
}
// Resize array according to STL's vector::resize() rules
void resize(size_t newCount) {
void resize(size_t newCount)
{
if (newCount > count) {
grow(newCount);
}
@ -217,7 +242,8 @@ public:
count = newCount;
}
}
void join(const Array<T, Storage>& L) {
void join(const Array<T, Storage>& L)
{
ensureCapacity(count + L.count);
memcpy(data + count, L.data, sizeof(T) * L.count);
count += L.count;
@ -226,21 +252,25 @@ public:
// Used as such in GlobalRWLock::blockingAstHandler
size_t getCount() const { return count; }
size_t getCapacity() const { return capacity; }
void push(const T& item) {
void push(const T& item)
{
add(item);
}
void push(const T* items, size_t itemsSize) {
void push(const T* items, size_t itemsSize)
{
ensureCapacity(count + itemsSize);
memcpy(data + count, items, sizeof(T) * itemsSize);
count += itemsSize;
}
T pop() {
T pop()
{
fb_assert(count > 0);
count--;
return data[count];
}
// prepare array to be used as a buffer of capacity items
T* getBuffer(size_t capacityL) {
T* getBuffer(size_t capacityL)
{
ensureCapacity(capacityL);
count = capacityL;
return data;
@ -254,7 +284,8 @@ public:
data = this->getStorage();
}
bool find(const T& item, size_t& pos) const {
bool find(const T& item, size_t& pos) const
{
for (size_t i = 0; i < count; i++) {
if (data[i] == item) {
pos = i;
@ -264,7 +295,8 @@ public:
return false;
}
bool exist(const T& item) const {
bool exist(const T& item) const
{
size_t pos; // ignored
return find(item, pos);
}
@ -272,7 +304,8 @@ public:
protected:
size_t count, capacity;
T* data;
void ensureCapacity(size_t newcapacity) {
void ensureCapacity(size_t newcapacity)
{
if (newcapacity > capacity) {
if (newcapacity < capacity * 2) {
newcapacity = capacity * 2;
@ -297,14 +330,16 @@ template <typename Value,
typename Key = Value,
typename KeyOfValue = DefaultKeyValue<Value>,
typename Cmp = DefaultComparator<Key> >
class SortedArray : public Array<Value, Storage> {
class SortedArray : public Array<Value, Storage>
{
public:
SortedArray(MemoryPool& p, size_t s) : Array<Value, Storage>(p, s) {}
explicit SortedArray(MemoryPool& p) : Array<Value, Storage>(p) {}
explicit SortedArray(size_t s) : Array<Value, Storage>(s) {}
SortedArray() : Array<Value, Storage>() {}
bool find(const Key& item, size_t& pos) const {
bool find(const Key& item, size_t& pos) const
{
size_t highBound = this->count, lowBound = 0;
while (highBound > lowBound) {
const size_t temp = (highBound + lowBound) >> 1;
@ -324,7 +359,8 @@ public:
return find(item, pos);
}
size_t add(const Value& item) {
size_t add(const Value& item)
{
size_t pos;
find(KeyOfValue::generate(this, item), pos);
insert(pos, item);
@ -334,7 +370,8 @@ public:
// Nice shorthand for arrays with static part
template <typename T, size_t InlineCapacity>
class HalfStaticArray : public Array<T, InlineStorage<T, InlineCapacity> > {
class HalfStaticArray : public Array<T, InlineStorage<T, InlineCapacity> >
{
public:
explicit HalfStaticArray(MemoryPool& p) : Array<T, InlineStorage<T, InlineCapacity> > (p) {}
HalfStaticArray(MemoryPool& p, size_t InitialCapacity) :

View File

@ -54,7 +54,8 @@ public:
};
template <typename Where, typename Clear = SimpleDelete<Where> >
class AutoPtr {
class AutoPtr
{
private:
Where* ptr;
public:
@ -62,35 +63,42 @@ public:
: ptr(v)
{}
~AutoPtr() {
~AutoPtr()
{
Clear::clear(ptr);
}
AutoPtr<Where, Clear>& operator= (Where* v) {
AutoPtr<Where, Clear>& operator= (Where* v)
{
Clear::clear(ptr);
ptr = v;
return *this;
}
operator Where*() {
operator Where*()
{
return ptr;
}
bool operator !() const {
bool operator !() const
{
return !ptr;
}
Where* operator->() {
Where* operator->()
{
return ptr;
}
Where* release() {
Where* release()
{
Where* tmp = ptr;
ptr = NULL;
return tmp;
}
void reset(Where* v = NULL) {
void reset(Where* v = NULL)
{
if (v != ptr) {
Clear::clear(ptr);
ptr = v;

View File

@ -46,24 +46,29 @@
namespace Firebird {
template <typename T>
class Win32Tls {
class Win32Tls
{
public:
Win32Tls() {
Win32Tls()
{
if ((key = TlsAlloc()) == 0xFFFFFFFF)
system_call_failed::raise("TlsAlloc");
}
const T get() {
const T get()
{
LPVOID value = TlsGetValue(key);
if ((value == NULL) && (GetLastError() != NO_ERROR))
system_call_failed::raise("TlsGetValue");
// return reinterpret_cast<T>(value);
return (T)value;
}
void set(const T value) {
void set(const T value)
{
if (TlsSetValue(key, (LPVOID)value) == 0)
system_call_failed::raise("TlsSetValue");
}
~Win32Tls() {
~Win32Tls()
{
if (TlsFree(key) == 0)
system_call_failed::raise("TlsFree");
}
@ -96,22 +101,27 @@ namespace Firebird {
template <typename T>
class TlsValue {
class TlsValue
{
public:
TlsValue() {
TlsValue()
{
if (pthread_key_create(&key, NULL))
system_call_failed::raise("pthread_key_create");
}
const T get() {
const T get()
{
// We use double C-style cast to allow using scalar datatypes
// with sizes up to size of pointer without warnings
return (T)(IPTR)pthread_getspecific(key);
}
void set(const T value) {
void set(const T value)
{
if (pthread_setspecific(key, (void*)(IPTR)value))
system_call_failed::raise("pthread_setspecific");
}
~TlsValue() {
~TlsValue()
{
if (pthread_key_delete(key))
system_call_failed::raise("pthread_key_delete");
}
@ -126,9 +136,11 @@ namespace Firebird {
template <typename T>
class TlsValue {
class TlsValue
{
public:
static void TlsV_on_thread_exit (void * pval) {
static void TlsV_on_thread_exit (void * pval)
{
/* Usually should delete pval like this
T * ptempT= (T*) pval ;
delete ptempT;
@ -136,25 +148,29 @@ public:
}
TlsValue() {
TlsValue()
{
if (thr_keycreate(&key, TlsV_on_thread_exit) )
system_call_failed::raise("thr_key_create");
}
const T get() {
const T get()
{
// We use double C-style cast to allow using scalar datatypes
// with sizes up to size of pointer without warnings
T * valuep;
T* valuep;
if (thr_getspecific(key, (void **) &valuep) == 0)
return (T)(IPTR) (valuep) ;
else
system_call_failed::raise("thr_getspecific");
return (T)NULL;
return (T)(IPTR) valuep ;
system_call_failed::raise("thr_getspecific");
return (T)NULL;
}
void set(const T value) {
void set(const T value)
{
if (thr_setspecific(key, (void*)(IPTR)value))
system_call_failed::raise("thr_setspecific");
}
~TlsValue() {
~TlsValue()
{
/* Do nothing if no pthread_key_delete */
}
private:

View File

@ -58,32 +58,38 @@ namespace Firebird
pos = 0;
}
*/
iterator& operator++() {
iterator& operator++()
{
++pos;
return (*this);
}
iterator operator++(int) {
iterator operator++(int)
{
iterator tmp = *this;
++pos;
return tmp;
}
iterator& operator--() {
iterator& operator--()
{
fb_assert(pos > 0);
--pos;
return (*this);
}
iterator operator--(int) {
iterator operator--(int)
{
fb_assert(pos > 0);
iterator tmp = *this;
--pos;
return tmp;
}
T* operator->() {
T* operator->()
{
fb_assert(lst);
T* pointer = lst->getPointer(pos);
return pointer;
}
T& operator*() {
T& operator*()
{
fb_assert(lst);
T* pointer = lst->getPointer(pos);
return *pointer;
@ -118,32 +124,38 @@ namespace Firebird
pos = 0;
}
*/
const_iterator& operator++() {
const_iterator& operator++()
{
++pos;
return (*this);
}
const_iterator operator++(int) {
const_iterator operator++(int)
{
const_iterator tmp = *this;
++pos;
return tmp;
}
const_iterator& operator--() {
const_iterator& operator--()
{
fb_assert(pos > 0);
--pos;
return (*this);
}
const_iterator operator--(int) {
const_iterator operator--(int)
{
fb_assert(pos > 0);
const_iterator tmp = *this;
--pos;
return tmp;
}
const T* operator->() {
const T* operator->()
{
fb_assert(lst);
const T* pointer = lst->getPointer(pos);
return pointer;
}
const T& operator*() {
const T& operator*()
{
fb_assert(lst);
const T* pointer = lst->getPointer(pos);
return *pointer;
@ -173,81 +185,100 @@ namespace Firebird
};
public:
void insert(size_t index, const T& item) {
void insert(size_t index, const T& item)
{
T* dataL = FB_NEW(this->getPool()) T(this->getPool(), item);
inherited::insert(index, dataL);
}
size_t add(const T& item) {
size_t add(const T& item)
{
T* dataL = FB_NEW(this->getPool()) T(this->getPool(), item);
return inherited::add(dataL);
}
T& add() {
T& add()
{
T* dataL = FB_NEW(this->getPool()) T(this->getPool());
inherited::add(dataL);
return *dataL;
}
void push(const T& item) {
void push(const T& item)
{
add(item);
}
T pop() {
T pop()
{
T* pntr = inherited::pop();
T rc = *pntr;
delete pntr;
return rc;
}
void remove(size_t index) {
void remove(size_t index)
{
fb_assert(index < getCount());
delete getPointer(index);
inherited::remove(index);
}
void remove(iterator itr) {
void remove(iterator itr)
{
fb_assert(itr.lst == this);
remove(itr.pos);
}
void shrink(size_t newCount) {
void shrink(size_t newCount)
{
for (size_t i = newCount; i < getCount(); i++) {
delete getPointer(i);
}
inherited::shrink(newCount);
}
iterator begin() {
iterator begin()
{
return iterator(this, 0);
}
iterator end() {
iterator end()
{
return iterator(this, getCount());
}
iterator back() {
iterator back()
{
fb_assert(getCount() > 0);
return iterator(this, getCount() - 1);
}
const_iterator begin() const {
const_iterator begin() const
{
return const_iterator(this, 0);
}
const_iterator end() const {
const_iterator end() const
{
return const_iterator(this, getCount());
}
const T& operator[](size_t index) const {
const T& operator[](size_t index) const
{
return *getPointer(index);
}
const T* getPointer(size_t index) const {
const T* getPointer(size_t index) const
{
return inherited::getElement(index);
}
T& operator[](size_t index) {
T& operator[](size_t index)
{
return *getPointer(index);
}
T* getPointer(size_t index) {
T* getPointer(size_t index)
{
return inherited::getElement(index);
}
explicit ObjectsArray(MemoryPool& p) : A(p) { }
ObjectsArray() : A() { }
~ObjectsArray() {
~ObjectsArray()
{
for (size_t i = 0; i < getCount(); i++) {
delete getPointer(i);
}
}
size_t getCount() const {return inherited::getCount();}
size_t getCapacity() const {return inherited::getCapacity();}
void clear() {
void clear()
{
for (size_t i = 0; i < getCount(); i++) {
delete getPointer(i);
}
@ -276,16 +307,19 @@ namespace Firebird
// Template to convert object value to index directly
template <typename T>
class ObjectKeyValue {
class ObjectKeyValue
{
public:
static const T& generate(const void* sender, const T* Item) { return Item; }
};
// Template for default value comparator
template <typename T>
class ObjectComparator {
class ObjectComparator
{
public:
static bool greaterThan(const T i1, const T i2) {
static bool greaterThan(const T i1, const T i2)
{
return *i1 > *i2;
}
};
@ -322,7 +356,8 @@ namespace Firebird
size_t pos;
return find(item, pos);
}
size_t add(const ObjectValue& item) {
size_t add(const ObjectValue& item)
{
return inherited::add(item);
}

View File

@ -34,7 +34,8 @@
namespace Firebird {
struct BitmapTypes_32 {
struct BitmapTypes_32
{
typedef ULONG BUNCH_T;
enum {
LOG2_BUNCH_BITS = 7,
@ -42,7 +43,8 @@ struct BitmapTypes_32 {
};
};
struct BitmapTypes_64 {
struct BitmapTypes_64
{
typedef FB_UINT64 BUNCH_T;
enum {
LOG2_BUNCH_BITS = 8,
@ -53,7 +55,8 @@ struct BitmapTypes_64 {
#define BUNCH_ONE ((BUNCH_T)1)
template <typename T, typename InternalTypes = BitmapTypes_64>
class SparseBitmap : public AutoStorage {
class SparseBitmap : public AutoStorage
{
public:
// Default constructor, stack placement
SparseBitmap() :
@ -66,34 +69,23 @@ public:
{ }
// Default accessor methods
bool locate(T key) {
return defaultAccessor.locate(locEqual, key);
}
bool locate(T key) { return defaultAccessor.locate(locEqual, key); }
bool locate(LocType lt, T key) {
return defaultAccessor.locate(lt, key);
}
bool locate(LocType lt, T key) { return defaultAccessor.locate(lt, key); }
bool getFirst() {
return defaultAccessor.getFirst();
}
bool getFirst() { return defaultAccessor.getFirst(); }
bool getLast() {
return defaultAccessor.getLast();
}
bool getLast() { return defaultAccessor.getLast(); }
bool getNext() {
return defaultAccessor.getNext();
}
bool getNext() { return defaultAccessor.getNext(); }
bool getPrev() {
return defaultAccessor.getPrev();
}
bool getPrev() { return defaultAccessor.getPrev(); }
T current() const { return defaultAccessor.current(); }
// Set bit
void set(T value) {
void set(T value)
{
if (singular) {
// If we are trying to set the same bit as already set - do nothing
if (singular_value == value)
@ -130,7 +122,8 @@ public:
}
}
bool clear(T value) {
bool clear(T value)
{
if (singular) {
fb_assert(tree.isEmpty());
@ -155,7 +148,8 @@ public:
return false;
}
bool test(T value) {
bool test(T value)
{
if (singular) {
fb_assert(tree.isEmpty());
return (value == singular_value);
@ -169,24 +163,28 @@ public:
return false;
}
static bool test(SparseBitmap* bitmap, T value) {
static bool test(SparseBitmap* bitmap, T value)
{
if (!bitmap)
return false;
return bitmap->test(value);
}
// Clear bitmap if it is not NULL
static void reset(SparseBitmap* bitmap) {
static void reset(SparseBitmap* bitmap)
{
if (bitmap)
bitmap->clear();
}
size_t approxSize() const {
size_t approxSize() const
{
return sizeof(*this) + tree.approxSize();
}
// Make bitmap empty
void clear() {
void clear()
{
singular = false;
tree.clear();
}
@ -208,10 +206,12 @@ protected:
};
// Bucket with bits
struct Bucket {
struct Bucket
{
T start_value; // starting value, BUNCH_BITS-aligned
BUNCH_T bits; // bits data
inline static const T& generate(const void* sender, const Bucket& i) {
inline static const T& generate(const void* sender, const Bucket& i)
{
return i.start_value;
}
};
@ -230,18 +230,21 @@ private:
SparseBitmap& operator =(const SparseBitmap& from); // Assignment operator. Not implemented for now.
public:
class Accessor {
class Accessor
{
public:
Accessor(SparseBitmap* _bitmap) :
bitmap(_bitmap), treeAccessor(_bitmap ? &_bitmap->tree : NULL), bit_mask(BUNCH_ONE), current_value(0) {}
bool locate(T key) {
bool locate(T key)
{
return locate(locEqual, key);
}
// Position accessor on item having LocType relationship with given key
// If method returns false position of accessor is not defined.
bool locate(LocType lt, T key) {
bool locate(LocType lt, T key)
{
// Small convenience related to fact engine likes to use NULL SparseBitmap pointers
if (!bitmap)
return false;
@ -297,7 +300,8 @@ public:
current_value = key;
bit_mask = BUNCH_ONE << (key - key_aligned);
return treeAccessor.current().bits & bit_mask;
case locGreatEqual: {
case locGreatEqual:
{
// Initialize bit_mask
if (treeAccessor.current().start_value == key_aligned) {
current_value = key;
@ -335,7 +339,8 @@ public:
// Bucket must contain one bit at least
fb_assert(false);
}
case locLessEqual: {
case locLessEqual:
{
// Initialize bit_mask
if (treeAccessor.current().start_value == key_aligned) {
current_value = key;
@ -380,7 +385,8 @@ public:
// If method returns false it means list is empty and
// position of accessor is not defined.
bool getFirst() {
bool getFirst()
{
// Small convenience related to fact engine likes to use NULL SparseBitmap pointers
if (!bitmap)
return false;
@ -409,7 +415,8 @@ public:
// If method returns false it means list is empty and
// position of accessor is not defined.
bool getLast() {
bool getLast()
{
// Small convenience related to fact engine likes to use NULL SparseBitmap pointers
if (!bitmap)
return false;
@ -438,7 +445,8 @@ public:
// Accessor position must be establised via successful call to getFirst(),
// getLast() or locate() before you can call this method
bool getNext() {
bool getNext()
{
if (bitmap->singular)
return false;
@ -487,7 +495,8 @@ public:
// Accessor position must be establised via successful call to getFirst(),
// getLast() or locate() before you can call this method
bool getPrev() {
bool getPrev()
{
if (bitmap->singular)
return false;
@ -681,16 +690,16 @@ SparseBitmap<T, InternalTypes>::bit_and(
if (map1->singular) {
if (map2->test(map1->singular_value))
return bitmap1;
else
return NULL;
return NULL;
}
// Second bitmap is singular. Test appropriate bit in first and return second
if (map2->singular) {
if (map1->test(map2->singular_value))
return bitmap2;
else
return NULL;
return NULL;
}
SparseBitmap *source, *dest, **result;

View File

@ -224,8 +224,8 @@ namespace Firebird {
if (stk && stk->next)
return stk->next->hasMore(value);
else
return false;
return false;
}
bool hasData() const
@ -256,8 +256,8 @@ namespace Firebird {
bool operator== (const Stack<Object, Capacity>& s) const
{
return (this->stk == s.stk) &&
(s.stk ? this->elem == s.stk->getCount() : true);
return (this->stk == s.stk) &&
(s.stk ? this->elem == s.stk->getCount() : true);
}
bool operator!= (const Stack<Object, Capacity>& s) const
@ -327,8 +327,8 @@ namespace Firebird {
if (stk && stk->next)
return stk->next->hasMore(value);
else
return false;
return false;
}
bool hasData() const

View File

@ -270,12 +270,14 @@ void TimeStamp::round_time(ISC_TIME &ntime, int precision)
}
// Encode timestamp from UNIX datetime structure
void TimeStamp::encode(const struct tm* times, int fractions) {
void TimeStamp::encode(const struct tm* times, int fractions)
{
mValue = encode_timestamp(times, fractions);
}
// Decode timestamp into UNIX datetime structure
void TimeStamp::decode(struct tm* times, int* fractions) const {
void TimeStamp::decode(struct tm* times, int* fractions) const
{
decode_timestamp(mValue, times, fractions);
}

View File

@ -37,15 +37,18 @@ namespace Firebird {
// Very fast static array of simple types
template <typename T, size_t Capacity>
class Vector {
class Vector
{
public:
Vector() : count(0) {}
T& operator[](size_t index) {
T& operator[](size_t index)
{
fb_assert(index < count);
return data[index];
}
const T& operator[](size_t index) const {
const T& operator[](size_t index) const
{
fb_assert(index < count);
return data[index];
}
@ -57,34 +60,40 @@ public:
size_t getCapacity() const { return Capacity; }
void clear() { count = 0; }
void insert(size_t index, const T& item) {
void insert(size_t index, const T& item)
{
fb_assert(index <= count);
fb_assert(count < Capacity);
memmove(data + index + 1, data + index, sizeof(T) * (count++ - index));
data[index] = item;
}
size_t add(const T& item) {
size_t add(const T& item)
{
fb_assert(count < Capacity);
data[count] = item;
return ++count;
}
T* remove(size_t index) {
T* remove(size_t index)
{
fb_assert(index < count);
memmove(data + index, data + index + 1, sizeof(T) * (--count - index));
return &data[index];
}
void shrink(size_t newCount) {
void shrink(size_t newCount)
{
fb_assert(newCount <= count);
count = newCount;
}
void join(const Vector<T, Capacity>& L) {
void join(const Vector<T, Capacity>& L)
{
fb_assert(count + L.count <= Capacity);
memcpy(data + count, L.data, sizeof(T) * L.count);
count += L.count;
}
// prepare vector to be used as a buffer of capacity items
T* getBuffer(size_t capacityL) {
T* getBuffer(size_t capacityL)
{
fb_assert(capacityL <= Capacity);
count = capacityL;
return data;
@ -97,16 +106,19 @@ protected:
// Template for default value comparsion
template <typename T>
class DefaultComparator {
class DefaultComparator
{
public:
static bool greaterThan(const T& i1, const T& i2) {
static bool greaterThan(const T& i1, const T& i2)
{
return i1 > i2;
}
};
// Template to convert value to index directly
template <typename T>
class DefaultKeyValue {
class DefaultKeyValue
{
public:
static const T& generate(const void* sender, const T& Item) { return Item; }
};
@ -116,10 +128,12 @@ public:
template <typename Value, size_t Capacity, typename Key = Value,
typename KeyOfValue = DefaultKeyValue<Value>,
typename Cmp = DefaultComparator<Key> >
class SortedVector : public Vector<Value, Capacity> {
class SortedVector : public Vector<Value, Capacity>
{
public:
SortedVector() : Vector<Value, Capacity>() {}
bool find(const Key& item, size_t& pos) const {
bool find(const Key& item, size_t& pos) const
{
size_t highBound = this->count, lowBound = 0;
while (highBound > lowBound) {
const size_t temp = (highBound + lowBound) >> 1;
@ -132,7 +146,8 @@ public:
return highBound != this->count &&
!Cmp::greaterThan(KeyOfValue::generate(this, this->data[lowBound]), item);
}
size_t add(const Value& item) {
size_t add(const Value& item)
{
size_t pos;
find(KeyOfValue::generate(this, item), pos);
insert(pos, item);

View File

@ -70,7 +70,8 @@ public:
};
class DirectoryList : public ObjectsArray<ParsedPath> {
class DirectoryList : public ObjectsArray<ParsedPath>
{
private:
typedef ObjectsArray<ParsedPath> inherited;
// ListMode must be changed together with ListKeys in dir_list.cpp
@ -85,7 +86,8 @@ private:
PathName key, PathName next);
protected:
// Clear allocated memory and reinitialize
void clear(void) {
void clear(void)
{
((inherited*)this)->clear();
mode = NotInitialized;
}
@ -110,13 +112,11 @@ public:
// Search for file Name in all directories of DirectoryList.
// If found, return full path to it in Path.
// Otherwise Path = Name.
bool expandFileName(PathName& path,
const PathName& name) const;
bool expandFileName(PathName& path, const PathName& name) const;
// Use first directory in this directory list
// to build default full name for a file
bool defaultName(PathName& path,
const PathName& name) const;
bool defaultName(PathName& path, const PathName& name) const;
};
class TempDirectoryList : public DirectoryList {

View File

@ -17,7 +17,8 @@ const size_t ENGINE_FAILURE_SPACE = 4096;
typedef Firebird::CircularStringsBuffer<ENGINE_FAILURE_SPACE> CircularBuffer;
class InterlockedStringsBuffer : public CircularBuffer {
class InterlockedStringsBuffer : public CircularBuffer
{
public:
explicit InterlockedStringsBuffer(Firebird::MemoryPool&)
: CircularBuffer() { }

View File

@ -215,14 +215,17 @@ public:
ISC_STATUS *dba_status;
ISC_STATUS_ARRAY dba_status_vector;
static inline tdba* getSpecific() {
static inline tdba* getSpecific()
{
return (tdba*) ThreadData::getSpecific();
}
static inline void putSpecific(tdba* &tddba, tdba* thd_context) {
static inline void putSpecific(tdba* &tddba, tdba* thd_context)
{
tddba = thd_context;
tddba->ThreadData::putSpecific();
}
static inline void restoreSpecific() {
static inline void restoreSpecific()
{
ThreadData::restoreSpecific();
}
};

View File

@ -105,16 +105,17 @@ void missing_parameter_for_switch(const char* sw) {
class b_error : public Firebird::LongJump
{
public:
explicit b_error(const char* message) {
explicit b_error(const char* message)
{
size_t len = sizeof(txt) - 1;
strncpy(txt, message, len);
txt[len] = 0;
}
enum {MSG_LEN = 1024};
virtual ~b_error() throw() {}
virtual const char* what() const throw()
{ return txt; }
static void raise(const char* message, ...) {
virtual const char* what() const throw() { return txt; }
static void raise(const char* message, ...)
{
char temp[MSG_LEN];
va_list params;
va_start(params, message);
@ -144,7 +145,8 @@ const char local_prefix[] = "localhost:";
const char backup_signature[4] = {'N','B','A','K'};
struct inc_header {
struct inc_header
{
char signature[4]; // 'NBAK'
SSHORT version; // Incremental backup format version.
SSHORT level; // Backup level.
@ -157,7 +159,8 @@ struct inc_header {
ULONG prev_scn; // SCN of previous level backup
};
class nbackup {
class nbackup
{
public:
nbackup(const char* _database, const char* _username, const char* _password, bool _run_db_triggers)
{
@ -557,7 +560,8 @@ void nbackup::backup_database(int level, const char* fname)
attach_database();
try {
// Look for SCN and GUID of previous-level backup in history table
if (level) {
if (level)
{
if (isc_start_transaction(status, &trans, 1, &newdb, 0, NULL))
pr_error(status, "start transaction");
char out_sqlda_data[XSQLDA_LENGTH(2)];