8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-23 14:03:07 +01:00

Fixed bug CORE-3409 : Segmentation Fault after upgrade 2.1.3 -> 2.1.4

This commit is contained in:
hvlad 2011-07-17 09:59:42 +00:00
parent da874ec655
commit a406f32ca5
3 changed files with 175 additions and 81 deletions

View File

@ -55,7 +55,9 @@ class CachedLock : public GlobalRWLock
public:
CachedLock(thread_db* tdbb, Firebird::MemoryPool& p, locktype_t lckType,
size_t lockLen, const UCHAR* lockStr) :
GlobalRWLock(tdbb, p, lckType, lockLen, lockStr)
GlobalRWLock(tdbb, p, lckType, lockLen, lockStr),
m_use_count(0),
m_chgKey(NULL)
{
QUE_INIT(m_lru);
}
@ -69,13 +71,33 @@ public:
bool setLockKey(thread_db *tdbb, const UCHAR* key)
{
m_chgKey = key;
if (!tryReleaseLock(tdbb))
{
m_chgKey = NULL;
return false;
}
memcpy(&cached_lock->lck_key, key, cached_lock->lck_length);
m_chgKey = NULL;
return true;
}
void addRef()
{
m_use_count++;
}
void release()
{
m_use_count--;
}
bool isUsed()
{
return m_use_count;
}
static const KeyHolder generate(const void*, const CachedLock* lock) {
return KeyHolder(lock->getLockKey(), lock->cached_lock->lck_length);
}
@ -84,7 +106,9 @@ public:
return KeyHolder::greaterThan(i1, i2);
}
int m_use_count;
que m_lru;
const void* m_chgKey;
};
@ -93,17 +117,19 @@ template <class LockClass = CachedLock>
class LocksCache
{
public:
LocksCache(Jrd::thread_db *tdbb, Jrd::lck_t lockType, size_t lockLen, size_t maxCapacity);
LocksCache(thread_db *tdbb, Jrd::lck_t lockType, size_t lockLen, size_t maxCapacity);
~LocksCache();
GlobalRWLock* get(thread_db *tdbb, const UCHAR* key);
CachedLock* get(thread_db *tdbb, const UCHAR* key);
private:
Firebird::MemoryPool &m_pool;
que m_lru;
que m_changing;
lck_t m_lockType;
size_t m_lockLen;
size_t m_capacity;
size_t m_allocated;
Firebird::SortedArray<LockClass*, Firebird::EmptyStorage<LockClass*>,
const KeyHolder, LockClass, LockClass> m_sortedLocks;
@ -117,9 +143,11 @@ LocksCache<LockClass>::LocksCache(thread_db *tdbb, lck_t lockType, size_t lockLe
m_sortedLocks(m_pool, maxCapacity)
{
QUE_INIT(m_lru);
QUE_INIT(m_changing);
m_lockType = lockType;
m_lockLen = lockLen;
m_capacity = maxCapacity;
m_allocated = 0;
}
template <class LockClass>
@ -135,31 +163,60 @@ LocksCache<LockClass>::~LocksCache()
}
template <class LockClass>
GlobalRWLock* LocksCache<LockClass>::get(thread_db *tdbb, const UCHAR* key)
CachedLock* LocksCache<LockClass>::get(thread_db *tdbb, const UCHAR* key)
{
int tries = MIN(m_capacity / 2, 16);
while (true)
{
LockClass* lock = NULL;
size_t pos;
if (m_sortedLocks.find(KeyHolder(key, m_lockLen), pos))
{
lock = m_sortedLocks[pos];
if (lock->m_chgKey)
{
ThreadExit te;
THREAD_YIELD();
continue;
}
QUE_DELETE(lock->m_lru);
QUE_INSERT(m_lru, lock->m_lru);
return lock;
}
else
bool changing = false;
QUE que_inst = m_changing.que_forward;
for (; que_inst != &m_changing; que_inst = que_inst->que_forward)
{
LockClass* chgLock = (LockClass*) ((SCHAR*) que_inst - OFFSET (LockClass*, m_lru));
fb_assert(chgLock->m_chgKey);
if (memcmp(chgLock->m_chgKey, key, m_lockLen) == 0)
{
changing = true;
{
ThreadExit te;
THREAD_YIELD();
}
que_inst = m_changing.que_forward;
continue;
}
}
if (changing)
continue;
if (m_allocated < m_capacity || !tries)
{
if (m_sortedLocks.getCount() < m_capacity) {
lock = FB_NEW (m_pool) LockClass(tdbb, m_pool, m_lockType, m_lockLen, key);
}
else
{
QUE que_inst = m_lru.que_backward;
int tries = MIN(m_capacity / 2, 16);
while (true)
{
if (tries == 0)
{
m_allocated++;
if (!tries) {
m_capacity++;
lock = FB_NEW (m_pool) LockClass(tdbb, m_pool, m_lockType, m_lockLen, key);
break;
}
QUE_INSERT(m_lru, lock->m_lru);
m_sortedLocks.insert(pos, lock);
return lock;
}
// We going to change key of the least recently used lock.
@ -168,56 +225,75 @@ GlobalRWLock* LocksCache<LockClass>::get(thread_db *tdbb, const UCHAR* key)
// our lock from internal structures first and only then try
// to change its key
if (que_inst == &m_lru) {
que_inst = que_inst->que_backward;
if (QUE_EMPTY(m_lru))
{
tries--;
ThreadExit te;
THREAD_YIELD();
continue;
}
que_inst = m_lru.que_backward;
while (tries && que_inst != &m_lru)
{
lock = (LockClass*) ((SCHAR*) que_inst - OFFSET (LockClass*, m_lru));
if (!lock->isUsed())
break;
lock = NULL;
que_inst = que_inst->que_backward;
tries--;
}
if (!lock)
continue;
bool found = (m_sortedLocks.find(KeyHolder(lock->getLockKey(), m_lockLen), pos));
if (!found) {
DebugBreak();
}
fb_assert(found);
que_inst = que_inst->que_backward;
QUE_DELETE(lock->m_lru);
m_sortedLocks.remove(pos);
QUE_INSERT(m_changing, lock->m_lru);
if (lock->setLockKey(tdbb, key))
break;
{
found = (m_sortedLocks.find(KeyHolder(lock->getLockKey(), m_lockLen), pos));
if (found) {
DebugBreak();
}
fb_assert(!found);
// remove from changing que
QUE_DELETE(lock->m_lru);
QUE_INSERT(m_lru, lock->m_lru);
m_sortedLocks.insert(pos, lock);
return lock;
}
tries--;
found = (m_sortedLocks.find(KeyHolder(lock->getLockKey(), m_lockLen), pos));
if (found)
{
DebugBreak();
}
else
{
// remove from changing que
QUE_DELETE(lock->m_lru);
// move busy lock to the head of LRU queue
QUE_INSERT(m_lru, lock->m_lru);
// and put it back to the sorted array
found = (m_sortedLocks.find(KeyHolder(lock->getLockKey(), m_lockLen), pos));
m_sortedLocks.insert(pos, lock);
}
fb_assert(!found);
m_sortedLocks.insert(pos, lock);
}
if (m_sortedLocks.find(KeyHolder(key, m_lockLen), pos))
{
Firebird::HalfStaticArray<UCHAR, 64> zeroBuf;
UCHAR* zeroKey = zeroBuf.getBuffer(m_lockLen);
memset(zeroKey, 0, m_lockLen);
bool ok = lock->setLockKey(tdbb, zeroKey);
fb_assert(ok);
m_sortedLocks.insert(0, lock);
QUE_APPEND(m_lru, lock->m_lru);
lock = m_sortedLocks[pos+1];
QUE_DELETE(lock->m_lru);
QUE_INSERT(m_lru, lock->m_lru);
return lock;
}
}
m_sortedLocks.insert(pos, lock);
}
QUE_INSERT(m_lru, lock->m_lru);
return lock;
}
}; // namespace Jrd

View File

@ -266,6 +266,7 @@ void BtrPageGCLock::disablePageGC(thread_db* tdbb, const PageNumber &page)
BtrPageLocks* locks = getLocksCache(tdbb);
m_lock = locks->get(tdbb, key);
m_lock->addRef();
m_lock->lock(tdbb, LCK_read, LCK_WAIT);
}
@ -273,6 +274,7 @@ void BtrPageGCLock::enablePageGC(thread_db* tdbb)
{
fb_assert(m_lock);
m_lock->unlock(tdbb, LCK_read);
m_lock->release();
m_lock = NULL;
}
@ -282,7 +284,8 @@ bool BtrPageGCLock::isPageGCAllowed(thread_db* tdbb, const PageNumber& page)
page.getLockStr(key);
BtrPageLocks* locks = getLocksCache(tdbb);
GlobalRWLock *lock = locks->get(tdbb, key);
CachedLock *lock = locks->get(tdbb, key);
lock->addRef();
ISC_STATUS_ARRAY temp_status;
ISC_STATUS* const org_status = tdbb->tdbb_status_vector;
@ -293,6 +296,7 @@ bool BtrPageGCLock::isPageGCAllowed(thread_db* tdbb, const PageNumber& page)
if (res) {
lock->unlock(tdbb, LCK_write);
}
lock->release();
tdbb->tdbb_status_vector = org_status;
@ -1895,6 +1899,14 @@ void BTR_remove(thread_db* tdbb, WIN * root_window, index_insertion* insertion)
index_root_page* root =
(index_root_page*) CCH_FETCH(tdbb, root_window, LCK_write, pag_root);
if (root->irt_rpt[idx->idx_id].irt_root != window.win_page.getPageNum())
{
gds__log("BTR_remove: root page changed");
CCH_RELEASE(tdbb, root_window);
return;
}
page = (btree_page*) CCH_FETCH(tdbb, &window, LCK_write, pag_index);
// get the page number of the child, and check to make sure
@ -1906,7 +1918,13 @@ void BTR_remove(thread_db* tdbb, WIN * root_window, index_insertion* insertion)
const SLONG number = pageNode.pageNumber;
pointer = BTreeNode::readNode(&pageNode, pointer, flags, false);
if (!(pageNode.isEndBucket || pageNode.isEndLevel)) {
if (!(pageNode.isEndBucket || pageNode.isEndLevel) ||
!BtrPageGCLock::isPageGCAllowed(tdbb, window.win_page))
{
if (pageNode.isEndBucket || pageNode.isEndLevel) {
gds__log("BTR_remove: root page gc not allowed");
}
CCH_RELEASE(tdbb, &window);
CCH_RELEASE(tdbb, root_window);
return;

View File

@ -239,13 +239,13 @@ public:
~BtrPageGCLock();
static BtrPageLocks* getLocksCache(thread_db* tdbb);
void disablePageGC(thread_db* tdbb, const PageNumber &page);
void enablePageGC(thread_db* tdbb);
static bool isPageGCAllowed(thread_db* tdbb, const PageNumber &page);
private:
GlobalRWLock *m_lock;
static BtrPageLocks* getLocksCache(thread_db* tdbb);
CachedLock *m_lock;
};
} //namespace Jrd