8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-22 20:03:02 +01:00

Fixed compiling & running firebird under Valgrind control

This commit is contained in:
AlexPeshkoff 2023-09-08 18:04:07 +03:00
parent ac59a130bc
commit 569e05f8d8

View File

@ -70,7 +70,7 @@
#ifdef USE_VALGRIND
#include <valgrind/memcheck.h>
#define VALGRIND_FIX_IT // overrides suspicious valgrind behavior
//#define VALGRIND_FIX_IT // overrides suspicious valgrind behavior
#endif // USE_VALGRIND
namespace {
@ -119,7 +119,6 @@ struct DelayedExtent
{
void* memory; // Extent pointer
size_t size; // Size of extent
int handle; // Valgrind handle of protected extent block
};
DelayedExtent delayedExtents[DELAYED_EXTENT_COUNT];
@ -1814,12 +1813,12 @@ private:
};
#endif // VALIDATE_POOL
MemBlock* alloc(size_t from, size_t& length, bool flagRedirect);
MemBlock* allocateInternal(size_t from, size_t& length, bool flagRedirect);
void releaseBlock(MemBlock *block, bool flagDecr) noexcept;
public:
void* allocate(size_t size ALLOC_PARAMS);
MemBlock* allocate2(size_t from, size_t& size ALLOC_PARAMS);
MemBlock* allocateRange(size_t from, size_t& size ALLOC_PARAMS);
private:
virtual void memoryIsExhausted(void);
@ -2268,9 +2267,9 @@ void MemoryPool::setStatsGroup(MemoryStats& newStats) noexcept
pool->setStatsGroup(newStats);
}
MemBlock* MemPool::alloc(size_t from, size_t& length, bool flagRedirect)
MemBlock* MemPool::allocateInternal(size_t from, size_t& length, bool flagRedirect)
{
MutexEnsureUnlock guard(mutex, "MemPool::alloc");
MutexEnsureUnlock guard(mutex, "MemPool::allocateInternal");
guard.enter();
++blocksAllocated;
@ -2282,10 +2281,12 @@ MemBlock* MemPool::alloc(size_t from, size_t& length, bool flagRedirect)
if (block)
return block;
// Parent redirection of medium blocks
if (parent_redirect && flagRedirect && length < PARENT_REDIRECT_THRESHOLD)
{
guard.leave();
block = parent->alloc(from, length, false);
block = parent->allocateInternal(from, length, false);
guard.enter();
if (block)
@ -2326,22 +2327,21 @@ MemBlock* MemPool::alloc(size_t from, size_t& length, bool flagRedirect)
return hunk->block;
}
MemBlock* MemPool::allocate2(size_t from, size_t& size
MemBlock* MemPool::allocateRange(size_t from, size_t& size
#ifdef DEBUG_GDS_ALLOC
, const char* fileName, int line
#endif
)
{
size_t length = from ? size : ROUNDUP(size + VALGRIND_REDZONE, roundingSize) + GUARD_BYTES;
MemBlock* memory = alloc(from, length, true);
MemBlock* memory = allocateInternal(from, length, true);
size = length - (VALGRIND_REDZONE + GUARD_BYTES);
memory->pool = this;
#ifdef USE_VALGRIND
VALGRIND_MEMPOOL_ALLOC(this, &memory->body, size);
#endif
memory->pool = this;
#ifdef DEBUG_GDS_ALLOC
memory->fileName = fileName;
memory->lineNumber = line;
@ -2364,7 +2364,7 @@ void* MemPool::allocate(size_t size ALLOC_PARAMS)
Validator vld(this);
#endif
MemBlock* memory = allocate2(0, size ALLOC_PASS_ARGS);
MemBlock* memory = allocateRange(0, size ALLOC_PASS_ARGS);
increment_usage(memory->getSize());
@ -2393,7 +2393,7 @@ void MemPool::releaseMemory(void* object, bool flagExtent) noexcept
VALGRIND_MEMPOOL_FREE(pool, object);
// block is placed in delayed buffer - mark as NOACCESS for that time
VALGRIND_DISCARD(VALGRIND_MAKE_MEM_NOACCESS(block, offsetof(MemBlock, body)));
VALGRIND_MAKE_MEM_NOACCESS(block, block->getSize());
// Extend circular buffer if possible
if (pool->delayedFreeCount < FB_NELEM(pool->delayedFree))
@ -2409,18 +2409,6 @@ void MemPool::releaseMemory(void* object, bool flagExtent) noexcept
block = pool->delayedFree[pool->delayedFreePos];
object = &block->body;
// Re-enable access to MemBlock
VALGRIND_DISCARD(VALGRIND_MAKE_MEM_DEFINED(block, offsetof(MemBlock, body)));
// Remove protection from memory block
#ifdef VALGRIND_FIX_IT
VALGRIND_DISCARD(
VALGRIND_MAKE_MEM_DEFINED(object, block->getSize() - VALGRIND_REDZONE));
#else
VALGRIND_DISCARD(
VALGRIND_MAKE_MEM_UNDEFINED(object, block->getSize() - VALGRIND_REDZONE));
#endif
// Replace element in circular buffer
pool->delayedFree[pool->delayedFreePos] = requested_block;
@ -2429,9 +2417,14 @@ void MemPool::releaseMemory(void* object, bool flagExtent) noexcept
pool->delayedFreePos = 0;
#endif
// Re-enable access to MemBlock
block->valgrindInternal();
#ifdef DEBUG_GDS_ALLOC
block->fileName = NULL;
#endif
// Finally delete it
pool->releaseBlock(block, !flagExtent);
}
}
@ -2574,7 +2567,7 @@ void* MemPool::allocRaw(size_t size)
#ifdef USE_VALGRIND
// Let Valgrind forget that block was zero-initialized
VALGRIND_DISCARD(VALGRIND_MAKE_MEM_UNDEFINED(result, size));
VALGRIND_MAKE_MEM_UNDEFINED(result, size);
#endif
increment_mapping(size);
@ -2587,7 +2580,7 @@ void* MemPool::getExtent(size_t from, size_t& to) // pass desired minimum size,
#ifdef VALIDATE_POOL
MutexLockGuard guard(mutex, "MemPool::getExtent");
#endif
MemBlock* extent = allocate2(from, to ALLOC_ARGS);
MemBlock* extent = allocateRange(from, to ALLOC_ARGS);
extent->setExtent();
return &extent->body;
}
@ -2618,9 +2611,13 @@ void MemPool::releaseRaw(bool destroying, void* block, size_t size, ExtentsCache
return;
}
}
#define unmapBlockPtr block
#define unmapBlockSize size
#else
// Set access protection for block to prevent memory from deleted pool being accessed
int handle = /* //VALGRIND_MAKE_MEM_NOACCESS */ VALGRIND_MAKE_MEM_DEFINED(block, size);
VALGRIND_MAKE_MEM_NOACCESS(block, size);
size = FB_ALIGN(size, get_map_page_size());
@ -2640,23 +2637,18 @@ void MemPool::releaseRaw(bool destroying, void* block, size_t size, ExtentsCache
DelayedExtent* item = &delayedExtents[delayedExtentCount];
item->memory = block;
item->size = size;
item->handle = handle;
delayedExtentCount++;
return;
}
DelayedExtent* item = &delayedExtents[delayedExtentsPos];
// Free message associated with old extent in Valgrind
VALGRIND_DISCARD(item->handle);
// Set up the block we are going to unmap
unmapBlockPtr = item->memory;
unmapBlockSize = item->size;
// Replace element in circular buffer
item->memory = block;
item->handle = handle;
item->size = size;
// Move queue pointer to next element and cycle if needed
@ -2664,11 +2656,6 @@ void MemPool::releaseRaw(bool destroying, void* block, size_t size, ExtentsCache
if (delayedExtentsPos >= FB_NELEM(delayedExtents))
delayedExtentsPos = 0;
}
else
{
// Let Valgrind forget about unmapped block
VALGRIND_DISCARD(handle);
}
#endif
size = FB_ALIGN(size, get_map_page_size());
@ -2678,16 +2665,19 @@ void MemPool::releaseRaw(bool destroying, void* block, size_t size, ExtentsCache
#else // WIN_NT
#if (defined SOLARIS) && (defined HAVE_CADDR_T)
int rc = munmap((caddr_t) block, size);
int rc = munmap((caddr_t) unmapBlockPtr, unmapBlockSize);
#else
int rc = munmap(block, size);
int rc = munmap(unmapBlockPtr, unmapBlockSize);
#endif
if (rc)
{
if (errno == ENOMEM)
{
FailedBlock* failed = (FailedBlock*) block;
failed->blockSize = size;
FailedBlock* failed = (FailedBlock*) unmapBlockPtr;
#ifdef USE_VALGRIND
VALGRIND_MAKE_MEM_UNDEFINED(failed, sizeof(FailedBlock));
#endif
failed->blockSize = unmapBlockSize;
MutexLockGuard guard(cache_mutex, "MemPool::releaseRaw");
SemiDoubleLink::push(&failedList, failed);