8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-27 06:03:02 +01:00
firebird-mirror/src/lock/lock.cpp

4181 lines
112 KiB
C++
Raw Normal View History

2001-05-23 15:26:42 +02:00
/*
* PROGRAM: JRD Lock Manager
* MODULE: lock.cpp
2001-05-23 15:26:42 +02:00
* DESCRIPTION: Generic ISC Lock Manager
*
* The contents of this file are subject to the Interbase Public
* License Version 1.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy
* of the License at http://www.Inprise.com/IPL.html
*
* Software distributed under the License is distributed on an
* "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express
* or implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code was created by Inprise Corporation
* and its predecessors. Portions created by Inprise Corporation are
* Copyright (C) Inprise Corporation.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
2002-02-16 04:27:33 +01:00
*
* 2002.02.15 Sean Leyne - Code Cleanup, removed obsolete "IMP" port
*
* 2002.10.27 Sean Leyne - Completed removal of obsolete "DELTA" port
* 2002.10.27 Sean Leyne - Completed removal of obsolete "IMP" port
*
2002-10-30 07:40:58 +01:00
* 2002.10.29 Sean Leyne - Removed obsolete "Netware" port
* 2003.03.24 Nickolay Samofatov
* - cleanup #define's,
* - shutdown blocking thread cleanly on Windows CS
* - fix Windows CS lock-ups (make wakeup event manual-reset)
* - detect deadlocks instantly in most cases (if blocking owner
* dies during AST processing deadlock scan timeout still applies)
* 2003.04.29 Nickolay Samofatov - fix broken lock table resizing code in CS builds
* 2003.08.11 Nickolay Samofatov - finally and correctly fix Windows CS lock-ups.
* Roll back earlier workarounds on this subject.
2002-10-30 07:40:58 +01:00
*
2001-05-23 15:26:42 +02:00
*/
2001-05-23 15:26:42 +02:00
#include "firebird.h"
#include "../common/classes/timestamp.h"
2004-04-29 00:36:29 +02:00
#include <stdio.h>
2001-05-23 15:26:42 +02:00
#include "../jrd/common.h"
2008-01-16 08:40:12 +01:00
#include "../jrd/ThreadStart.h"
2001-05-23 15:26:42 +02:00
#include "../jrd/isc.h"
#include "../lock/lock.h"
#include "../lock/lock_proto.h"
#include "gen/iberror.h"
2001-05-23 15:26:42 +02:00
#include "../jrd/gds_proto.h"
#include "../jrd/gdsassert.h"
#include "../jrd/isc_proto.h"
2008-01-16 08:40:12 +01:00
#include "../jrd/isc_signal.h"
#include "../jrd/os/isc_i_proto.h"
2001-05-23 15:26:42 +02:00
#include "../jrd/isc_s_proto.h"
#include "../jrd/sch_proto.h"
#include "../jrd/thread_proto.h"
2002-12-07 14:27:12 +01:00
#include "../common/config/config.h"
2008-01-16 08:40:12 +01:00
#include "../common/classes/semaphore.h"
#include "../common/classes/init.h"
2008-01-16 08:40:12 +01:00
2001-05-23 15:26:42 +02:00
#include <errno.h>
2001-07-12 07:46:06 +02:00
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
2004-05-13 21:47:30 +02:00
#ifdef HAVE_SYS_WAIT_H
# include <sys/wait.h>
#endif
#if TIME_WITH_SYS_TIME
# include <sys/time.h>
# include <time.h>
#else
# if HAVE_SYS_TIME_H
# include <sys/time.h>
# else
# include <time.h>
# endif
#endif
2001-07-12 07:46:06 +02:00
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#ifdef HAVE_VFORK_H
2001-05-23 15:26:42 +02:00
#include <vfork.h>
#endif
#ifdef WIN_NT
#include <process.h>
2008-01-16 08:40:12 +01:00
#define MUTEX &lock_manager_mutex
2004-05-17 17:14:10 +02:00
#else
#define MUTEX LOCK_header->lhb_mutex
2001-05-23 15:26:42 +02:00
#endif
#ifdef DEV_BUILD
2003-09-04 23:26:15 +02:00
#define ASSERT_ACQUIRED current_is_active_owner (true, __LINE__)
#define ASSERT_RELEASED current_is_active_owner (false, __LINE__)
2001-05-23 15:26:42 +02:00
#define VALIDATE_LOCK_TABLE
2008-01-16 08:40:12 +01:00
#if ((defined HAVE_MMAP || defined WIN_NT) && !(defined SUPERSERVER))
2001-05-23 15:26:42 +02:00
#define LOCK_DEBUG_ACQUIRE
#define DEBUG_ACQUIRE_INTERVAL 5000
static ULONG debug_acquire_count = 0;
#endif
2004-05-17 17:14:10 +02:00
#define CHECK(x) { if (!(x)) bug_assert ("consistency check", __LINE__); }
//#define DEBUG_TRACE
#else // DEV_BUILD
2001-05-23 15:26:42 +02:00
#define ASSERT_ACQUIRED /* nothing */
#define ASSERT_RELEASED /* nothing */
#define CHECK(x) /* nothing */
2004-05-17 17:14:10 +02:00
#endif // DEV_BUILD
2001-05-23 15:26:42 +02:00
#ifdef DEBUG
#define DEBUG_MANAGER "manager"
#define DEBUG_TRACE
#endif
#ifdef DEBUG_TRACE
2004-05-09 07:48:33 +02:00
#define LOCK_TRACE(x) { time_t t; time(&t); printf("%s", ctime(&t) ); printf x; fflush (stdout); gds__log x;}
2004-05-17 17:14:10 +02:00
#else
#define LOCK_TRACE(x) /* nothing */
2001-05-23 15:26:42 +02:00
#endif
#ifdef DEBUG
SSHORT LOCK_debug_level = 0;
2004-11-24 10:22:07 +01:00
#define DEBUG_MSG(l, x) if ((l) <= LOCK_debug_level) { time_t t; time(&t); printf("%s", ctime(&t) ); printf x; fflush (stdout); gds__log x; }
2004-05-17 17:14:10 +02:00
#else
2004-11-24 10:22:07 +01:00
#define DEBUG_MSG(l, x) /* nothing */
2001-05-23 15:26:42 +02:00
#endif
/* Debug delay is used to create nice big windows for signals or other
* events to occur in - eg: slow down the code to try and make
* timing race conditions show up
*/
#ifdef DEBUG
#define DEBUG_DELAY debug_delay (__LINE__)
2004-05-17 17:14:10 +02:00
#else
2001-05-23 15:26:42 +02:00
#define DEBUG_DELAY /* nothing */
#endif
2007-07-29 04:32:41 +02:00
// CVC: Unlike other definitions, SRQ_PTR is not a pointer to something in lowercase.
// It's LONG.
2004-05-17 17:14:10 +02:00
const SRQ_PTR DUMMY_OWNER_CREATE = -1;
const SRQ_PTR DUMMY_OWNER_DELETE = -2;
2001-05-23 15:26:42 +02:00
static void acquire(SRQ_PTR);
2008-01-17 14:40:35 +01:00
static UCHAR *alloc(SSHORT, ISC_STATUS*);
static lbl* alloc_lock(USHORT, ISC_STATUS*);
2008-01-16 08:40:12 +01:00
static void blocking_action(SRQ_PTR, SRQ_PTR);
#ifdef USE_BLOCKING_THREAD
static THREAD_ENTRY_DECLARE blocking_action_thread(THREAD_ENTRY_PARAM);
#endif
2008-01-17 14:40:35 +01:00
static void bug(ISC_STATUS*, const TEXT*);
2001-05-23 15:26:42 +02:00
#ifdef DEV_BUILD
2008-01-17 14:40:35 +01:00
static void bug_assert(const TEXT*, ULONG);
2001-05-23 15:26:42 +02:00
#endif
static bool convert(SRQ_PTR, UCHAR, SSHORT, lock_ast_t, void*, ISC_STATUS*);
2008-01-17 14:40:35 +01:00
static bool create_owner(ISC_STATUS*, LOCK_OWNER_T, UCHAR, SRQ_PTR*);
2001-05-23 15:26:42 +02:00
#ifdef DEV_BUILD
2003-09-04 23:26:15 +02:00
static void current_is_active_owner(bool, ULONG);
2001-05-23 15:26:42 +02:00
#endif
2008-01-17 14:40:35 +01:00
static void deadlock_clear();
2008-01-16 08:40:12 +01:00
static lrq* deadlock_scan(own*, lrq*);
2008-01-17 14:40:35 +01:00
static lrq* deadlock_walk(lrq*, bool*);
static void dequeue(SRQ_PTR);
2001-05-23 15:26:42 +02:00
#ifdef DEBUG
static void debug_delay(ULONG);
#endif
2008-01-17 14:40:35 +01:00
static void exit_handler(void*);
2008-01-16 08:40:12 +01:00
static lbl* find_lock(SRQ_PTR, USHORT, const UCHAR*, USHORT, USHORT*);
static lrq* get_request(SRQ_PTR);
static void grant(lrq*, lbl*);
static SRQ_PTR grant_or_que(lrq*, lbl*, SSHORT);
2008-01-17 14:40:35 +01:00
static bool init_lock_table(ISC_STATUS*);
2008-01-16 08:40:12 +01:00
static void init_owner_block(own*, UCHAR, LOCK_OWNER_T);
static void lock_initialize(void*, SH_MEM, bool);
2008-01-16 08:40:12 +01:00
static void insert_data_que(lbl*);
2001-05-23 15:26:42 +02:00
static void insert_tail(SRQ, SRQ);
2008-01-16 08:40:12 +01:00
static USHORT lock_state(lbl*);
static void post_blockage(lrq*, lbl*, bool);
static void post_history(USHORT, SRQ_PTR, SRQ_PTR, SRQ_PTR, bool);
2008-01-16 08:40:12 +01:00
static void post_pending(lbl*);
static void post_wakeup(own*);
2001-05-23 15:26:42 +02:00
#ifndef SUPERSERVER
static bool probe_owners(SRQ_PTR);
2001-05-23 15:26:42 +02:00
#endif
static void purge_owner(SRQ_PTR, own*);
2001-05-23 15:26:42 +02:00
static void remove_que(SRQ);
static void release(SRQ_PTR);
2008-01-17 14:40:35 +01:00
static void release_mutex();
2008-01-16 08:40:12 +01:00
static void release_request(lrq*);
#ifdef USE_BLOCKING_THREAD
2008-01-17 14:40:35 +01:00
static void shutdown_blocking_thread(ISC_STATUS*);
#endif
2008-01-16 08:40:12 +01:00
static bool signal_owner(own*, SRQ_PTR);
2001-05-23 15:26:42 +02:00
#ifdef VALIDATE_LOCK_TABLE
2007-07-29 04:32:41 +02:00
static void validate_history(const SRQ_PTR history_header);
static void validate_parent(const lhb*, const SRQ_PTR);
static void validate_lhb(const lhb*);
static void validate_lock(const SRQ_PTR, USHORT, const SRQ_PTR);
static void validate_owner(const SRQ_PTR, USHORT);
static void validate_request(const SRQ_PTR, USHORT, USHORT);
static void validate_shb(const SRQ_PTR);
2001-05-23 15:26:42 +02:00
#endif
2008-01-17 14:40:35 +01:00
static USHORT wait_for_request(lrq*, SSHORT, ISC_STATUS*);
2001-05-23 15:26:42 +02:00
static SSHORT LOCK_bugcheck = 0;
2008-01-16 08:40:12 +01:00
static lhb* volatile LOCK_header = NULL;
static SRQ_PTR LOCK_owner_offset = 0;
static own* LOCK_owner = 0;
2008-01-16 08:40:12 +01:00
static int LOCK_pid = 0;
2001-05-23 15:26:42 +02:00
static SH_MEM_T LOCK_data;
2008-01-16 08:40:12 +01:00
#ifdef USE_BLOCKING_THREAD
static Firebird::GlobalPtr<Firebird::Semaphore> cleanupSemaphore;
static Firebird::GlobalPtr<Firebird::Semaphore> startupSemaphore;
2008-01-16 08:40:12 +01:00
#endif
2001-05-23 15:26:42 +02:00
#ifdef WIN_NT
2008-01-16 08:40:12 +01:00
static MTX_T lock_manager_mutex;
2001-05-23 15:26:42 +02:00
#endif
2004-05-17 17:14:10 +02:00
const SLONG HASH_MIN_SLOTS = 101;
2007-01-24 20:48:58 +01:00
const SLONG HASH_MAX_SLOTS = 65521;
2004-05-17 17:14:10 +02:00
const USHORT HISTORY_BLOCKS = 256;
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
#ifdef PREVENT_OWNER_STARVATION
const int STARVATION_THRESHHOLD = 500; // acquires of lock table
const int STALL_TIMEOUT = 60; // seconds
#endif
2001-05-23 15:26:42 +02:00
2004-06-09 20:57:07 +02:00
#define SRQ_BASE ((UCHAR*) LOCK_header)
2008-01-16 08:40:12 +01:00
static const bool compatibility[LCK_max][LCK_max] =
{
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
/* Shared Prot Shared Prot
none null Read Read Write Write Exclusive */
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
/* none */ {true, true, true, true, true, true, true},
/* null */ {true, true, true, true, true, true, true},
/* SR */ {true, true, true, true, true, true, false},
/* PR */ {true, true, true, true, false, false, false},
/* SW */ {true, true, true, false, true, false, false},
/* PW */ {true, true, true, false, false, false, false},
/* EX */ {true, true, false, false, false, false, false}
2001-05-23 15:26:42 +02:00
};
2008-01-16 08:40:12 +01:00
//#define COMPATIBLE(st1, st2) compatibility [st1 * LCK_max + st2]
static inline bool lockOrdering()
{
return (LOCK_header->lhb_flags & LHB_lock_ordering) ? true : false;
}
2001-05-23 15:26:42 +02:00
2008-01-17 14:40:35 +01:00
bool LOCK_convert(SRQ_PTR request_offset,
UCHAR type,
SSHORT lck_wait,
lock_ast_t ast_routine,
void* ast_argument,
ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ c o n v e r t
*
**************************************
*
* Functional description
* Perform a lock conversion, if possible.
*
**************************************/
LOCK_TRACE(("LOCK_convert (%d, %d)\n", type, lck_wait));
2003-12-31 06:36:12 +01:00
lrq* request = get_request(request_offset);
own* owner = (own*) SRQ_ABS_PTR(request->lrq_owner);
2001-05-23 15:26:42 +02:00
if (!owner->own_count)
2003-12-31 06:36:12 +01:00
return false;
2001-05-23 15:26:42 +02:00
acquire(request->lrq_owner);
owner = NULL; /* remap */
++LOCK_header->lhb_converts;
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset); /* remap */
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2001-05-23 15:26:42 +02:00
if (lock->lbl_series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations[lock->lbl_series];
else
++LOCK_header->lhb_operations[0];
2008-01-16 08:40:12 +01:00
return convert(request_offset, type, lck_wait, ast_routine, ast_argument,
status_vector);
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
bool LOCK_deq(SRQ_PTR request_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
2002-02-16 04:27:33 +01:00
* L O C K _ d e q
2001-05-23 15:26:42 +02:00
*
**************************************
*
* Functional description
* Release an outstanding lock.
*
**************************************/
LOCK_TRACE(("LOCK_deq (%ld)\n", request_offset));
2001-05-23 15:26:42 +02:00
2003-12-31 06:36:12 +01:00
lrq* request = get_request(request_offset);
SRQ_PTR owner_offset = request->lrq_owner;
own* owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
if (!owner->own_count)
2008-01-17 14:40:35 +01:00
return false;
2001-05-23 15:26:42 +02:00
acquire(owner_offset);
owner = NULL; /* remap */
++LOCK_header->lhb_deqs;
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset); /* remap */
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2001-05-23 15:26:42 +02:00
if (lock->lbl_series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations[lock->lbl_series];
else
++LOCK_header->lhb_operations[0];
dequeue(request_offset);
release(owner_offset);
2008-01-17 14:40:35 +01:00
return true;
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
UCHAR LOCK_downgrade(SRQ_PTR request_offset,
ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ d o w n g r a d e
*
**************************************
*
* Functional description
* Downgrade an existing lock returning
* its new state.
*
**************************************/
LOCK_TRACE(("LOCK_downgrade (%ld)\n", request_offset));
2001-05-23 15:26:42 +02:00
2003-12-31 06:36:12 +01:00
lrq* request = get_request(request_offset);
SRQ_PTR owner_offset = request->lrq_owner;
own* owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
if (!owner->own_count)
2008-01-17 14:40:35 +01:00
return FALSE; // Warning! Can be treated as LCK_none by the caller!
2001-05-23 15:26:42 +02:00
acquire(owner_offset);
owner = NULL; /* remap */
++LOCK_header->lhb_downgrades;
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset); /* Re-init after a potential remap */
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2003-12-31 06:36:12 +01:00
UCHAR pending_state = LCK_none;
2001-05-23 15:26:42 +02:00
/* Loop thru requests looking for pending conversions
and find the highest requested state */
srq* lock_srq;
SRQ_LOOP(lock->lbl_requests, lock_srq) {
2008-01-16 08:40:12 +01:00
const lrq* pending = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_lbl_requests));
2001-05-23 15:26:42 +02:00
if (pending->lrq_flags & LRQ_pending && pending != request) {
pending_state = MAX(pending->lrq_requested, pending_state);
if (pending_state == LCK_EX)
break;
}
}
2003-12-31 06:36:12 +01:00
UCHAR state = request->lrq_state;
2008-01-16 08:40:12 +01:00
while (state > LCK_none && !compatibility[pending_state][state])
2003-12-31 06:36:12 +01:00
--state;
2001-05-23 15:26:42 +02:00
if (state == LCK_none || state == LCK_null) {
dequeue(request_offset);
release(owner_offset);
state = LCK_none;
}
2003-12-31 06:36:12 +01:00
else {
2001-05-23 15:26:42 +02:00
convert(request_offset, state, FALSE,
request->lrq_ast_routine, request->lrq_ast_argument,
status_vector);
2003-12-31 06:36:12 +01:00
}
2001-05-23 15:26:42 +02:00
return state;
}
2008-01-17 14:40:35 +01:00
SRQ_PTR LOCK_enq(SRQ_PTR prior_request,
SRQ_PTR parent_request,
USHORT series,
const UCHAR* value,
USHORT length,
UCHAR type,
lock_ast_t ast_routine,
void* ast_argument,
SLONG data,
SSHORT lck_wait,
ISC_STATUS* status_vector,
SRQ_PTR owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ e n q
*
**************************************
*
* Functional description
* Enque on a lock. If the lock can't be granted immediately,
* return an event count on which to wait. If the lock can't
* be granted because of deadlock, return NULL.
*
**************************************/
LOCK_TRACE(("LOCK_enq (%ld)\n", parent_request));
2001-05-23 15:26:42 +02:00
own* owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
if (!owner_offset || !owner->own_count)
return 0;
acquire(owner_offset);
owner = NULL; /* remap */
ASSERT_ACQUIRED;
++LOCK_header->lhb_enqs;
#ifdef VALIDATE_LOCK_TABLE
if ((LOCK_header->lhb_enqs % 50) == 0)
validate_lhb(LOCK_header);
#endif
if (prior_request)
dequeue(prior_request);
2003-12-31 06:36:12 +01:00
lrq* request = 0;
SRQ_PTR parent;
2001-05-23 15:26:42 +02:00
if (parent_request) {
request = get_request(parent_request);
parent = request->lrq_lock;
}
else
parent = 0;
/* Allocate or reuse a lock request block */
ASSERT_ACQUIRED;
if (SRQ_EMPTY(LOCK_header->lhb_free_requests)) {
2008-01-16 08:40:12 +01:00
if (!(request = (lrq*) alloc(sizeof(lrq), status_vector))) {
2001-05-23 15:26:42 +02:00
release(owner_offset);
return 0;
}
}
else {
ASSERT_ACQUIRED;
2008-01-16 08:40:12 +01:00
request = (lrq*) ((UCHAR *) SRQ_NEXT(LOCK_header->lhb_free_requests) -
OFFSET(lrq*, lrq_lbl_requests));
2001-05-23 15:26:42 +02:00
remove_que(&request->lrq_lbl_requests);
}
owner = (own*) SRQ_ABS_PTR(owner_offset); /* Re-init after a potential remap */
post_history(his_enq, owner_offset, (SRQ_PTR)0, SRQ_REL_PTR(request), true);
2001-05-23 15:26:42 +02:00
request->lrq_type = type_lrq;
request->lrq_flags = 0;
request->lrq_requested = type;
request->lrq_state = LCK_none;
request->lrq_data = 0;
request->lrq_owner = owner_offset;
request->lrq_ast_routine = ast_routine;
request->lrq_ast_argument = ast_argument;
2001-05-23 15:26:42 +02:00
insert_tail(&owner->own_requests, &request->lrq_own_requests);
SRQ_INIT(request->lrq_own_blocks);
2001-05-23 15:26:42 +02:00
/* See if the lock already exits */
2003-12-31 06:36:12 +01:00
USHORT hash_slot;
lbl* lock = find_lock(parent, series, value, length, &hash_slot);
2001-05-23 15:26:42 +02:00
if (lock)
{
if (series < LCK_MAX_SERIES) {
++LOCK_header->lhb_operations[series];
2003-12-31 06:36:12 +01:00
}
else {
2001-05-23 15:26:42 +02:00
++LOCK_header->lhb_operations[0];
}
insert_tail(&lock->lbl_requests, &request->lrq_lbl_requests);
request->lrq_data = data;
2008-01-17 14:40:35 +01:00
const SRQ_PTR lock_id = grant_or_que(request, lock, lck_wait);
2003-12-31 06:36:12 +01:00
if (!lock_id) {
*status_vector++ = isc_arg_gds;
*status_vector++ = (lck_wait > 0) ? isc_deadlock :
((lck_wait < 0) ? isc_lock_timeout : isc_lock_conflict);
*status_vector++ = isc_arg_end;
2001-05-23 15:26:42 +02:00
}
ASSERT_RELEASED;
return lock_id;
}
/* Lock doesn't exist. Allocate lock block and set it up. */
SRQ_PTR request_offset = SRQ_REL_PTR(request);
2001-05-23 15:26:42 +02:00
if (!(lock = alloc_lock(length, status_vector))) {
/* lock table is exhausted */
/* release request gracefully */
remove_que(&request->lrq_own_requests);
request->lrq_type = type_null;
insert_tail(&LOCK_header->lhb_free_requests,
&request->lrq_lbl_requests);
release(owner_offset);
return 0;
}
2007-07-25 15:26:13 +02:00
2001-05-23 15:26:42 +02:00
lock->lbl_state = type;
lock->lbl_parent = parent;
2003-11-04 00:59:24 +01:00
fb_assert(series <= MAX_UCHAR);
2001-05-23 15:26:42 +02:00
lock->lbl_series = (UCHAR)series;
/* Maintain lock series data queue */
SRQ_INIT(lock->lbl_lhb_data);
2001-05-23 15:26:42 +02:00
if (lock->lbl_data = data)
insert_data_que(lock);
if (series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations[series];
else
++LOCK_header->lhb_operations[0];
lock->lbl_flags = 0;
lock->lbl_pending_lrq_count = 0;
2003-12-31 06:36:12 +01:00
{ // scope
SSHORT l = LCK_max;
USHORT* ps = lock->lbl_counts;
while (l--)
*ps++ = 0;
2003-12-31 06:36:12 +01:00
} // scope
2001-05-23 15:26:42 +02:00
if (lock->lbl_length = length)
{
2003-12-31 06:36:12 +01:00
UCHAR* p = lock->lbl_key;
2001-05-23 15:26:42 +02:00
do {
*p++ = *value++;
} while (--length);
}
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
2001-05-23 15:26:42 +02:00
SRQ_INIT(lock->lbl_requests);
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
insert_tail(&LOCK_header->lhb_hash[hash_slot], &lock->lbl_lhb_hash);
insert_tail(&lock->lbl_requests, &request->lrq_lbl_requests);
request->lrq_lock = SRQ_REL_PTR(lock);
2001-05-23 15:26:42 +02:00
grant(request, lock);
2008-01-17 14:40:35 +01:00
const SRQ_PTR lock_id = SRQ_REL_PTR(request);
2001-05-23 15:26:42 +02:00
release(request->lrq_owner);
return lock_id;
}
2008-01-17 14:40:35 +01:00
bool LOCK_set_owner_handle(SRQ_PTR request_offset,
SRQ_PTR new_owner_offset)
{
/**************************************
*
* L O C K _ s e t _ o w n e r _ h a n d l e
*
**************************************
*
* Functional description
* Set new owner handle for granted request.
*
**************************************/
LOCK_TRACE(("LOCK_set_owner_handle (%ld)\n", request_offset));
lrq* request = get_request(request_offset);
// We need not to change owner
if (request->lrq_owner == new_owner_offset)
return true;
acquire(new_owner_offset);
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset); /* Re-init after a potential remap */
own *old_owner = (own*) SRQ_ABS_PTR(request->lrq_owner);
fb_assert(old_owner->own_pending_request != request_offset);
own *new_owner = (own*) SRQ_ABS_PTR(new_owner_offset);
fb_assert(new_owner->own_pending_request != request_offset);
fb_assert(old_owner->own_process_id == new_owner->own_process_id);
2008-01-16 08:40:12 +01:00
lbl *lck = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
// Make sure that change of lock owner is possible
SRQ lock_srq;
SRQ_LOOP(lck->lbl_requests, lock_srq) {
2008-01-16 08:40:12 +01:00
lrq* granted_request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_own_requests));
// One owner must have the only granted request on the same lock resource
if (granted_request->lrq_owner == new_owner_offset) {
LOCK_TRACE(("The owner already has a granted request"));
release(request->lrq_owner);
return false;
}
}
remove_que(&request->lrq_own_requests);
request->lrq_owner = new_owner_offset;
insert_tail(&new_owner->own_requests, &request->lrq_own_requests);
release(new_owner_offset);
return true;
}
2001-05-23 15:26:42 +02:00
2008-01-17 14:40:35 +01:00
void LOCK_fini(ISC_STATUS* status_vector,
SRQ_PTR* owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ f i n i
*
**************************************
*
* Functional description
* Release the owner block and any outstanding locks.
* The exit handler will unmap the shared memory.
*
**************************************/
LOCK_TRACE(("LOCK_fini(%ld)\n", *owner_offset));
SRQ_PTR offset = *owner_offset;
own* owner = (own*) SRQ_ABS_PTR(offset);
2001-05-23 15:26:42 +02:00
if (!offset || !owner->own_count)
return;
if (--owner->own_count > 0 || !LOCK_header)
return;
#ifdef USE_BLOCKING_THREAD
2001-05-23 15:26:42 +02:00
shutdown_blocking_thread(status_vector);
#endif
2008-01-16 08:40:12 +01:00
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
2001-05-23 15:26:42 +02:00
if (LOCK_owner) {
2004-11-24 10:22:07 +01:00
ISC_unmap_object(status_vector, &LOCK_data, (UCHAR**)&LOCK_owner,
2003-09-18 12:24:03 +02:00
sizeof(own));
2001-05-23 15:26:42 +02:00
LOCK_owner_offset = 0;
}
#endif
#ifndef SUPERSERVER
2001-05-23 15:26:42 +02:00
LOCK_owner = 0;
#endif
if (LOCK_header->lhb_active_owner != offset)
{
acquire(offset);
owner = (own*) SRQ_ABS_PTR(offset); /* Re-init after a potential remap */
2001-05-23 15:26:42 +02:00
}
if (LOCK_pid == owner->own_process_id)
purge_owner(offset, owner);
release_mutex();
2008-01-16 08:40:12 +01:00
*owner_offset = (SRQ_PTR) 0;
2001-05-23 15:26:42 +02:00
}
2008-01-16 08:40:12 +01:00
int LOCK_init(ISC_STATUS* status_vector,
LOCK_OWNER_T owner_id,
UCHAR owner_type,
2008-01-17 14:40:35 +01:00
SRQ_PTR* owner_handle)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ i n i t
*
**************************************
*
* Functional description
* Initialize lock manager for the given owner, if not already done.
*
* Initialize an owner block in the lock manager, if not already
* initialized.
*
* Return the offset of the owner block through owner_handle.
*
* Return FB_SUCCESS or FB_FAILURE.
2001-05-23 15:26:42 +02:00
*
**************************************/
LOCK_TRACE(("LOCK_init (ownerid=%ld)\n", owner_id));
/* If everything is already initialized, just bump the use count. */
2003-12-31 06:36:12 +01:00
own* owner = 0;
2001-05-23 15:26:42 +02:00
if (*owner_handle) {
owner = (own*) SRQ_ABS_PTR(*owner_handle);
2001-05-23 15:26:42 +02:00
owner->own_count++;
return FB_SUCCESS;
2001-05-23 15:26:42 +02:00
}
if (!LOCK_header) {
/* We haven't yet mapped the shared region. Do so now. */
2008-01-16 08:40:12 +01:00
if (!init_lock_table(status_vector))
return FB_FAILURE;
2001-05-23 15:26:42 +02:00
}
2008-01-16 08:40:12 +01:00
if (!create_owner(status_vector, owner_id, owner_type, owner_handle))
return FB_FAILURE;
2001-05-23 15:26:42 +02:00
#ifndef SUPERSERVER
2008-01-16 08:40:12 +01:00
if ( (LOCK_owner_offset = *owner_handle) )
LOCK_owner = (own*) SRQ_ABS_PTR(*owner_handle);
2001-05-23 15:26:42 +02:00
#endif
/* Initialize process level stuffs for different platforms.
This should be done after the call to create_owner() that
initializes owner_handle. */
2008-01-16 08:40:12 +01:00
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
/* Map the owner block separately so that threads waiting
on synchronization variables embedded in the owner block
don't have to coordinate during lock table unmapping. */
if (LOCK_owner_offset &&
!(LOCK_owner = (own*) ISC_map_object(status_vector, &LOCK_data,
LOCK_owner_offset,
2003-09-18 12:24:03 +02:00
sizeof(own))))
2003-12-31 06:36:12 +01:00
{
return FB_FAILURE;
2003-12-31 06:36:12 +01:00
}
#endif
2008-01-16 08:40:12 +01:00
#ifdef USE_BLOCKING_THREAD
2004-05-17 12:22:34 +02:00
AST_ALLOC();
const ULONG status = gds__thread_start(blocking_action_thread,
&LOCK_owner_offset, THREAD_high, 0, 0);
if (status) {
*status_vector++ = isc_arg_gds;
*status_vector++ = isc_lockmanerr;
*status_vector++ = isc_arg_gds;
*status_vector++ = isc_sys_request;
*status_vector++ = isc_arg_string;
2008-01-16 08:40:12 +01:00
#ifdef WIN_NT
*status_vector++ = (ISC_STATUS) "CreateThread";
*status_vector++ = isc_arg_win32;
#else
2003-04-10 12:08:31 +02:00
*status_vector++ = (ISC_STATUS) "thr_create";
*status_vector++ = isc_arg_unix;
2008-01-16 08:40:12 +01:00
#endif
2001-05-23 15:26:42 +02:00
*status_vector++ = status;
*status_vector++ = isc_arg_end;
return FB_FAILURE;
2001-05-23 15:26:42 +02:00
}
#endif
return FB_SUCCESS;
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
SLONG LOCK_query_data(SRQ_PTR parent_request,
USHORT series,
USHORT aggregate)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ q u e r y _ d a t a
*
**************************************
*
* Functional description
* Query lock series data with respect to a rooted
* lock hierarchy calculating aggregates as we go.
*
**************************************/
2008-01-16 08:40:12 +01:00
lrq* parent;
2001-05-23 15:26:42 +02:00
/* Get root of lock hierarchy */
if (parent_request && series < LCK_MAX_SERIES)
parent = get_request(parent_request);
else {
2003-09-04 23:26:15 +02:00
CHECK(false);
2001-05-23 15:26:42 +02:00
return 0;
}
acquire(parent->lrq_owner);
2008-01-16 08:40:12 +01:00
parent = (lrq*) SRQ_ABS_PTR(parent_request); /* remap */
2001-05-23 15:26:42 +02:00
++LOCK_header->lhb_query_data;
2003-12-31 06:36:12 +01:00
srq* data_header = &LOCK_header->lhb_data[series];
2004-12-16 04:03:13 +01:00
SLONG data = 0, count = 0;
2001-05-23 15:26:42 +02:00
/* Simply walk the lock series data queue forward for the minimum
and backward for the maximum -- it's maintained in sorted order. */
2004-12-16 04:03:13 +01:00
SRQ lock_srq;
2001-05-23 15:26:42 +02:00
switch (aggregate) {
case LCK_MIN:
case LCK_CNT:
case LCK_AVG:
case LCK_SUM:
case LCK_ANY:
for (lock_srq = (SRQ) SRQ_ABS_PTR(data_header->srq_forward);
lock_srq != data_header; lock_srq = (SRQ) SRQ_ABS_PTR(lock_srq->srq_forward))
2003-12-31 06:36:12 +01:00
{
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) ((UCHAR *) lock_srq - OFFSET(lbl*, lbl_lhb_data));
2001-05-23 15:26:42 +02:00
CHECK(lock->lbl_series == series);
if (lock->lbl_parent != parent->lrq_lock)
continue;
switch (aggregate) {
case LCK_MIN:
data = lock->lbl_data;
break;
case LCK_ANY:
case LCK_CNT:
++count;
break;
case LCK_AVG:
++count;
case LCK_SUM:
data += lock->lbl_data;
break;
}
if (aggregate == LCK_MIN || aggregate == LCK_ANY)
break;
}
if (aggregate == LCK_CNT || aggregate == LCK_ANY)
data = count;
else if (aggregate == LCK_AVG)
data = (count) ? data / count : 0;
break;
case LCK_MAX:
for (lock_srq = (SRQ) SRQ_ABS_PTR(data_header->srq_backward);
lock_srq != data_header; lock_srq = (SRQ) SRQ_ABS_PTR(lock_srq->srq_backward))
2003-12-31 06:36:12 +01:00
{
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) ((UCHAR *) lock_srq - OFFSET(lbl*, lbl_lhb_data));
2001-05-23 15:26:42 +02:00
CHECK(lock->lbl_series == series);
if (lock->lbl_parent != parent->lrq_lock)
continue;
data = lock->lbl_data;
break;
}
break;
default:
2003-09-04 23:26:15 +02:00
CHECK(false);
2001-05-23 15:26:42 +02:00
}
release(parent->lrq_owner);
return data;
}
SLONG LOCK_read_data(SRQ_PTR request_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ r e a d _ d a t a
*
**************************************
*
* Functional description
* Read data associated with a lock.
*
**************************************/
LOCK_TRACE(("LOCK_read_data(%ld)\n", request_offset));
2003-12-31 06:36:12 +01:00
lrq* request = get_request(request_offset);
2001-05-23 15:26:42 +02:00
acquire(request->lrq_owner);
++LOCK_header->lhb_read_data;
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset); /* Re-init after a potential remap */
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2003-12-31 06:36:12 +01:00
const SLONG data = lock->lbl_data;
2001-05-23 15:26:42 +02:00
if (lock->lbl_series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations[lock->lbl_series];
else
++LOCK_header->lhb_operations[0];
release(request->lrq_owner);
return data;
}
SLONG LOCK_read_data2(SRQ_PTR parent_request,
2001-05-23 15:26:42 +02:00
USHORT series,
2008-01-17 14:40:35 +01:00
const UCHAR* value,
USHORT length,
SRQ_PTR owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ r e a d _ d a t a 2
*
**************************************
*
* Functional description
* Read data associated with transient locks.
*
**************************************/
LOCK_TRACE(("LOCK_read_data2(%ld)\n", parent_request));
acquire(owner_offset);
++LOCK_header->lhb_read_data;
if (series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations[series];
else
++LOCK_header->lhb_operations[0];
2004-12-16 04:03:13 +01:00
SRQ_PTR parent;
2008-01-16 08:40:12 +01:00
lrq* request;
2001-05-23 15:26:42 +02:00
if (parent_request) {
request = get_request(parent_request);
parent = request->lrq_lock;
}
else
parent = 0;
2004-12-16 04:03:13 +01:00
SLONG data;
USHORT junk;
2008-01-16 08:40:12 +01:00
lbl* lock = find_lock(parent, series, value, length, &junk);
2004-12-16 04:03:13 +01:00
if (lock)
2001-05-23 15:26:42 +02:00
data = lock->lbl_data;
else
data = 0;
release(owner_offset);
return data;
}
2008-01-17 14:40:35 +01:00
void LOCK_re_post(lock_ast_t ast,
void* arg,
SRQ_PTR owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ r e _ p o s t
*
**************************************
*
* Functional description
* Re-post an AST that was previously blocked.
* It is assumed that the routines that look
* at the re-post list only test the ast element.
*
**************************************/
2008-01-16 08:40:12 +01:00
lrq* request;
2001-05-23 15:26:42 +02:00
LOCK_TRACE(("LOCK_re_post(%ld)\n", owner_offset));
acquire(owner_offset);
/* Allocate or reuse a lock request block */
ASSERT_ACQUIRED;
if (SRQ_EMPTY(LOCK_header->lhb_free_requests)) {
2008-01-16 08:40:12 +01:00
if (!(request = (lrq*) alloc(sizeof(lrq), NULL))) {
2001-05-23 15:26:42 +02:00
release(owner_offset);
return;
}
}
else {
ASSERT_ACQUIRED;
2008-01-16 08:40:12 +01:00
request = (lrq*) ((UCHAR *) SRQ_NEXT(LOCK_header->lhb_free_requests) -
OFFSET(lrq*, lrq_lbl_requests));
2001-05-23 15:26:42 +02:00
remove_que(&request->lrq_lbl_requests);
}
own* owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
request->lrq_type = type_lrq;
request->lrq_flags = LRQ_repost;
request->lrq_ast_routine = ast;
request->lrq_ast_argument = arg;
2001-05-23 15:26:42 +02:00
request->lrq_requested = LCK_none;
request->lrq_state = LCK_none;
request->lrq_owner = owner_offset;
request->lrq_lock = (SRQ_PTR) 0;
2001-05-23 15:26:42 +02:00
insert_tail(&owner->own_blocks, &request->lrq_own_blocks);
DEBUG_DELAY;
#ifdef USE_BLOCKING_THREAD
signal_owner((own*) SRQ_ABS_PTR(owner_offset), (SRQ_PTR) NULL);
2001-05-23 15:26:42 +02:00
#else
/* The deadlock detection looks at the OWN_signaled bit to decide
* whether processes have things to look at - as we're putting
* a repost item on the blocking queue, we DO have additional work
* to do, so set the flag to indicate so.
*/
owner->own_flags &= ~OWN_signal;
owner->own_ast_flags |= OWN_signaled;
DEBUG_DELAY;
2008-01-16 08:40:12 +01:00
blocking_action(owner_offset, (SRQ_PTR) NULL);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
#endif
release(owner_offset);
}
2008-01-17 14:40:35 +01:00
SLONG LOCK_write_data(SRQ_PTR request_offset,
SLONG data)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* L O C K _ w r i t e _ d a t a
*
**************************************
*
* Functional description
* Write a longword into the lock block.
*
**************************************/
LOCK_TRACE(("LOCK_write_data (%ld)\n", request_offset));
2003-12-31 06:36:12 +01:00
lrq* request = get_request(request_offset);
2001-05-23 15:26:42 +02:00
acquire(request->lrq_owner);
++LOCK_header->lhb_write_data;
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset); /* Re-init after a potential remap */
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2001-05-23 15:26:42 +02:00
remove_que(&lock->lbl_lhb_data);
if (lock->lbl_data = data)
insert_data_que(lock);
if (lock->lbl_series < LCK_MAX_SERIES)
++LOCK_header->lhb_operations[lock->lbl_series];
else
++LOCK_header->lhb_operations[0];
release(request->lrq_owner);
return data;
}
2008-01-17 14:40:35 +01:00
static void acquire(SRQ_PTR owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* a c q u i r e
*
**************************************
*
* Functional description
2008-01-16 08:40:12 +01:00
* Acquire the lock file. If it's busy, wait for it.
2001-05-23 15:26:42 +02:00
*
**************************************/
2008-01-16 08:40:12 +01:00
#ifdef PREVENT_OWNER_STARVATION
2001-05-23 15:26:42 +02:00
acquire_retry:
#endif
/* Check that we aren't trying to acquire when we already own it! */
/* ASSERT_RELEASED; This will not work, when the current active owner
of the lock table falls in the remapped portion of the map
file, which we are yet to expand (remap) to */
/* Measure the impact of the lock table resource as an overall
system bottleneck. This will be useful metric for lock
improvements and as a limiting factor for SMP. A conditional
mutex would probably be more accurate but isn't worth the
effort. */
2008-01-16 08:40:12 +01:00
SRQ_PTR prior_active = LOCK_header->lhb_active_owner;
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
#ifdef PREVENT_OWNER_STARVATION
2001-05-23 15:26:42 +02:00
if (LOCK_owner) {
/* Record a "timestamp" of when this owner requested the lock table */
LOCK_owner->own_acquire_time = LOCK_header->lhb_acquires;
LOCK_owner->own_ast_hung_flags |= OWN_hung;
}
#endif
/* Perform a spin wait on the lock table mutex. This should only
be used on SMP machines; it doesn't make much sense otherwise. */
2008-01-16 08:40:12 +01:00
const SLONG acquire_spins = Config::getLockAcquireSpins();
SLONG status = FB_FAILURE;
SLONG spins = 0;
while (spins++ < acquire_spins) {
if ((status = ISC_mutex_lock_cond(MUTEX)) == FB_SUCCESS) {
2001-05-23 15:26:42 +02:00
break;
}
}
2001-05-23 15:26:42 +02:00
/* If the spin wait didn't succeed then wait forever. */
if (status != FB_SUCCESS) {
if (ISC_mutex_lock(MUTEX)) {
2001-05-23 15:26:42 +02:00
bug(NULL, "semop failed (acquire)");
}
}
2001-05-23 15:26:42 +02:00
++LOCK_header->lhb_acquires;
if (prior_active) {
2001-05-23 15:26:42 +02:00
++LOCK_header->lhb_acquire_blocks;
}
2001-05-23 15:26:42 +02:00
if (spins) {
++LOCK_header->lhb_acquire_retries;
2008-01-16 08:40:12 +01:00
if (spins < acquire_spins) {
2001-05-23 15:26:42 +02:00
++LOCK_header->lhb_retry_success;
}
2001-05-23 15:26:42 +02:00
}
prior_active = LOCK_header->lhb_active_owner;
LOCK_header->lhb_active_owner = owner_offset;
2008-01-16 08:40:12 +01:00
#ifdef PREVENT_OWNER_STARVATION
2001-05-23 15:26:42 +02:00
if (LOCK_owner) {
LOCK_owner->own_ast_hung_flags &= ~OWN_hung; /* Can't be hung by OS if we got here */
}
#endif
if (LOCK_header->lhb_length > LOCK_data.sh_mem_length_mapped
#ifdef LOCK_DEBUG_ACQUIRE
/* If we're debugging remaps occuring during acquire, force
a remap every-so-often. */
|| ((debug_acquire_count++ % DEBUG_ACQUIRE_INTERVAL) == 0)
#endif
) {
2008-01-16 08:40:12 +01:00
SLONG length = LOCK_header->lhb_length;
/* We do not do Lock table remapping here for SuperServer because
we have only one address space and we do not need to adjust our
mapping because another process has changed size of the lock table.
*/
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
2003-04-16 12:18:51 +02:00
ISC_STATUS_ARRAY status_vector;
2008-01-16 08:40:12 +01:00
lhb* const header =
(lhb*) ISC_remap_file(status_vector, &LOCK_data, length, FALSE);
if (header)
LOCK_header = header;
else
2001-05-23 15:26:42 +02:00
#endif
{
bug(NULL, "remap failed");
return;
}
}
/* If we were able to acquire the MUTEX, but there is an prior owner marked
* in the the lock table, it means that someone died while owning
* the lock mutex. In that event, lets see if there is any unfinished work
* left around that we need to finish up.
*/
if (prior_active) {
post_history(his_active, owner_offset, prior_active, (SRQ_PTR) 0, false);
2008-01-16 08:40:12 +01:00
shb* recover = (shb*) SRQ_ABS_PTR(LOCK_header->lhb_secondary);
2001-05-23 15:26:42 +02:00
if (recover->shb_remove_node) {
/* There was a remove_que operation in progress when the prior_owner died */
DEBUG_MSG(0, ("Got to the funky shb_remove_node code\n"));
remove_que((SRQ) SRQ_ABS_PTR(recover->shb_remove_node));
2001-05-23 15:26:42 +02:00
}
else if (recover->shb_insert_que && recover->shb_insert_prior) {
/* There was a insert_que operation in progress when the prior_owner died */
DEBUG_MSG(0, ("Got to the funky shb_insert_que code\n"));
2004-12-16 04:03:13 +01:00
SRQ lock_srq = (SRQ) SRQ_ABS_PTR(recover->shb_insert_que);
lock_srq->srq_backward = recover->shb_insert_prior;
lock_srq = (SRQ) SRQ_ABS_PTR(recover->shb_insert_prior);
lock_srq->srq_forward = recover->shb_insert_que;
2001-05-23 15:26:42 +02:00
recover->shb_insert_que = 0;
recover->shb_insert_prior = 0;
}
}
2008-01-16 08:40:12 +01:00
#ifdef PREVENT_OWNER_STARVATION
if (owner_offset > 0) {
// Can't be hung by OS if we got here
own* owner = (own*)SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
owner->own_ast_hung_flags &= ~OWN_hung;
own* first_owner = (own*) ((UCHAR *) SRQ_NEXT(LOCK_header->lhb_owners) -
OFFSET(own*, own_lhb_owners));
if (first_owner->own_ast_hung_flags & OWN_hung &&
((LOCK_header->lhb_acquires - first_owner->own_acquire_time)
> STARVATION_THRESHHOLD))
{
first_owner->own_flags |= OWN_starved;
if (owner->own_flags & OWN_blocking) {
probe_owners(owner_offset);
owner->own_flags &= ~OWN_blocking;
release_mutex();
}
else {
owner->own_flags |= (OWN_blocking | OWN_waiting);
owner->own_flags &= ~OWN_wakeup;
event_t* event_ptr = &owner->own_stall;
const SLONG value = ISC_event_clear(event_ptr);
release_mutex();
const SLONG ret = ISC_event_wait(1, &event_ptr, &value,
STALL_TIMEOUT * 1000000);
2001-05-23 15:26:42 +02:00
#ifdef DEV_BUILD
2008-01-16 08:40:12 +01:00
if (ret != FB_SUCCESS)
gds__log("LOCK: owner %d timed out while stalling for benefit of owner %d",
owner_offset, SRQ_REL_PTR(first_owner));
2001-05-23 15:26:42 +02:00
#endif
}
2008-01-16 08:40:12 +01:00
goto acquire_retry;
2001-05-23 15:26:42 +02:00
}
2008-01-16 08:40:12 +01:00
owner->own_flags &= ~OWN_blocking;
2001-05-23 15:26:42 +02:00
}
#endif
}
2008-01-17 14:40:35 +01:00
static UCHAR* alloc(SSHORT size, ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* a l l o c
*
**************************************
*
* Functional description
* Allocate a block of given size.
*
**************************************/
2003-10-27 19:42:47 +01:00
size = FB_ALIGN(size, ALIGNMENT);
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
2003-12-31 06:36:12 +01:00
const ULONG block = LOCK_header->lhb_used;
2001-05-23 15:26:42 +02:00
LOCK_header->lhb_used += size;
/* Make sure we haven't overflowed the lock table. If so, bump the size of
the table */
if (LOCK_header->lhb_used > LOCK_header->lhb_length) {
LOCK_header->lhb_used -= size;
2008-01-16 08:40:12 +01:00
/* We do not do Lock table remapping for SuperServer mainly because it is not tested
and is not really needed as long SS builds do not use lock manager for page locks.
On all other platforms we grow lock table automatically.
*/
2008-01-16 08:40:12 +01:00
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
2003-12-31 06:36:12 +01:00
const ULONG length = LOCK_data.sh_mem_length_mapped + EXTEND_SIZE;
2008-01-16 08:40:12 +01:00
lhb* header = (lhb*) ISC_remap_file(status_vector, &LOCK_data, length, TRUE);
2001-05-23 15:26:42 +02:00
if (header) {
LOCK_header = header;
ASSERT_ACQUIRED;
LOCK_header->lhb_length = LOCK_data.sh_mem_length_mapped;
LOCK_header->lhb_used += size;
}
else
#endif
{
/* Do not do abort in case if there is not enough room -- just
return an error */
if (status_vector) {
*status_vector++ = isc_arg_gds;
*status_vector++ = isc_random;
*status_vector++ = isc_arg_string;
2003-04-10 12:08:31 +02:00
*status_vector++ = (ISC_STATUS) "lock manager out of room";
*status_vector++ = isc_arg_end;
2001-05-23 15:26:42 +02:00
}
return NULL;
2001-05-23 15:26:42 +02:00
}
}
#ifdef DEV_BUILD
/* This version of alloc() doesn't initialize memory. To shake out
any bugs, in DEV_BUILD we initialize it to a "funny" pattern */
memset((void*)SRQ_ABS_PTR(block), 0xFD, size);
2001-05-23 15:26:42 +02:00
#endif
return (UCHAR*) SRQ_ABS_PTR(block);
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
static lbl* alloc_lock(USHORT length, ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* a l l o c _ l o c k
*
**************************************
*
* Functional description
* Allocate a lock for a key of a given length. Look first to see
* if a spare of the right size is sitting around. If not, allocate
* one.
*
**************************************/
length = (length + 3) & ~3;
ASSERT_ACQUIRED;
srq* lock_srq;
SRQ_LOOP(LOCK_header->lhb_free_locks, lock_srq) {
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) ((UCHAR *) lock_srq - OFFSET(lbl*, lbl_lhb_hash));
2001-05-23 15:26:42 +02:00
if (lock->lbl_size == length) {
remove_que(&lock->lbl_lhb_hash);
lock->lbl_type = type_lbl;
return lock;
}
}
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) alloc(sizeof(lbl) + length, status_vector);
2003-12-31 06:36:12 +01:00
if (lock) {
2001-05-23 15:26:42 +02:00
lock->lbl_size = length;
lock->lbl_type = type_lbl;
}
/* NOTE: if the above alloc() fails do not release mutex here but rather
release it in LOCK_enq() (as of now it is the only function that
calls alloc_lock()). We need to hold mutex to be able
to release a lock request block */
return lock;
}
2008-01-16 08:40:12 +01:00
static void blocking_action(SRQ_PTR blocking_owner_offset,
SRQ_PTR blocked_owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* b l o c k i n g _ a c t i o n 2
*
**************************************
*
* Functional description
* Fault hander for a blocking signal. A blocking signal
* is an indication (albeit a strong one) that a blocking
* AST is pending for the owner. Check in with the data
* structure for details.
* The re-post code in this routine assumes that no more
* than one thread of execution can be running in this
* routine at any time.
*
* IMPORTANT: Before calling this routine, acquire() should
* have already been done.
*
* Note that both a blocking owner offset and blocked owner
* offset are passed to this function. This is for those
* cases where the owners are not the same. If they are
* the same, then the blocked owner offset will be NULL.
*
**************************************/
ASSERT_ACQUIRED;
own* owner = (own*) SRQ_ABS_PTR(blocking_owner_offset);
2001-05-23 15:26:42 +02:00
if (!blocked_owner_offset)
blocked_owner_offset = blocking_owner_offset;
while (owner->own_count) {
srq* lock_srq = SRQ_NEXT(owner->own_blocks);
if (lock_srq == &owner->own_blocks) {
2001-05-23 15:26:42 +02:00
/* We've processed the own_blocks queue, reset the "we've been
* signaled" flag and start winding out of here
*/
owner->own_ast_flags &= ~OWN_signaled;
2003-09-04 23:26:15 +02:00
/*post_history (his_leave_ast, blocking_owner_offset, 0, 0, true); */
2001-05-23 15:26:42 +02:00
break;
}
2008-01-16 08:40:12 +01:00
lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_own_blocks));
2003-12-31 06:36:12 +01:00
lock_ast_t routine = request->lrq_ast_routine;
void* arg = request->lrq_ast_argument;
2001-05-23 15:26:42 +02:00
remove_que(&request->lrq_own_blocks);
if (request->lrq_flags & LRQ_blocking) {
request->lrq_flags &= ~LRQ_blocking;
request->lrq_flags |= LRQ_blocking_seen;
++LOCK_header->lhb_blocks;
post_history(his_post_ast, blocking_owner_offset,
request->lrq_lock, SRQ_REL_PTR(request), true);
2001-05-23 15:26:42 +02:00
}
else if (request->lrq_flags & LRQ_repost) {
request->lrq_type = type_null;
insert_tail(&LOCK_header->lhb_free_requests,
&request->lrq_lbl_requests);
}
if (routine) {
release(blocked_owner_offset);
(*routine)(arg);
2001-05-23 15:26:42 +02:00
acquire(blocked_owner_offset);
owner = (own*) SRQ_ABS_PTR(blocking_owner_offset);
2001-05-23 15:26:42 +02:00
}
}
}
2008-01-16 08:40:12 +01:00
#ifdef USE_BLOCKING_THREAD
static THREAD_ENTRY_DECLARE blocking_action_thread(THREAD_ENTRY_PARAM arg)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
2008-01-16 08:40:12 +01:00
* b l o c k i n g _ a c t i o n _ t h r e a d
2001-05-23 15:26:42 +02:00
*
**************************************
*
* Functional description
* Thread to handle blocking signals.
*
**************************************/
2007-07-29 04:32:41 +02:00
SRQ_PTR* owner_offset_ptr = (SRQ_PTR*) arg;
bool atStartup = true;
2004-05-17 12:22:34 +02:00
AST_INIT(); /* Check into scheduler as AST thread */
2001-05-23 15:26:42 +02:00
2003-09-04 23:26:15 +02:00
while (true) {
2004-05-17 12:22:34 +02:00
AST_ENTER();
2001-05-23 15:26:42 +02:00
/* See if main thread has requested us to go away */
if (!*owner_offset_ptr ||
LOCK_owner->own_process_id != LOCK_pid ||
2003-12-31 06:36:12 +01:00
!LOCK_owner->own_owner_id)
{
if (atStartup)
{
startupSemaphore->release();
}
2003-12-31 06:36:12 +01:00
break;
}
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
const SLONG value = ISC_event_clear(&LOCK_owner->own_blocking);
DEBUG_DELAY;
acquire(*owner_offset_ptr);
blocking_action(*owner_offset_ptr, (SRQ_PTR) NULL);
release(*owner_offset_ptr);
2004-05-17 12:22:34 +02:00
AST_EXIT();
if (atStartup)
{
atStartup = false;
startupSemaphore->release();
}
2008-01-16 08:40:12 +01:00
event_t* event_ptr = &LOCK_owner->own_blocking;
ISC_event_wait(1, &event_ptr, &value, 0);
2001-05-23 15:26:42 +02:00
}
/* Main thread asked us to go away, do our cleanup, then tell
* main thread we're done (see shutdown_blocking_action()).
*/
2004-05-17 12:22:34 +02:00
AST_EXIT();
AST_FINI(); /* Check out of scheduler as AST thread */
2001-05-23 15:26:42 +02:00
/* Wakeup the main thread waiting for our exit. */
/* Main thread won't wait forever, so check LOCK_owner is still mapped */
if (LOCK_owner)
cleanupSemaphore->release();
2008-01-16 08:40:12 +01:00
return 0;
2001-05-23 15:26:42 +02:00
}
#endif
#ifdef DEV_BUILD
2008-01-17 14:40:35 +01:00
static void bug_assert(const TEXT* string, ULONG line)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* b u g _ a s s e r t
*
**************************************
*
* Functional description
* Disasterous lock manager bug. Issue message and abort process.
*
**************************************/
TEXT buffer[MAXPATHLEN + 100];
2003-09-18 12:24:03 +02:00
lhb LOCK_header_copy;
2001-05-23 15:26:42 +02:00
2006-01-16 17:49:15 +01:00
sprintf((char *) buffer, "%s %"ULONGFORMAT": lock assertion failure: %.60s\n",
2001-05-23 15:26:42 +02:00
__FILE__, line, string);
/* Copy the shared memory so we can examine its state when we crashed */
LOCK_header_copy = *LOCK_header;
bug(NULL, buffer); /* Never returns */
}
#endif
2008-01-17 14:40:35 +01:00
static void bug(ISC_STATUS* status_vector, const TEXT* string)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* b u g
*
**************************************
*
* Functional description
* Disasterous lock manager bug. Issue message and abort process.
*
**************************************/
TEXT s[2 * MAXPATHLEN];
2001-05-23 15:26:42 +02:00
#ifdef WIN_NT
sprintf(s, "Fatal lock manager error: %s, errno: %ld", string, ERRNO);
#else
sprintf(s, "Fatal lock manager error: %s, errno: %d", string, ERRNO);
#endif
gds__log(s);
2004-04-29 00:36:29 +02:00
fprintf(stderr, "%s\n", s);
#if !(defined WIN_NT)
/* The strerror() function returns the appropriate description string,
or an unknown error message if the error code is unknown. */
2004-04-29 00:36:29 +02:00
fprintf(stderr, "--%s\n", strerror(errno));
#endif
2001-05-23 15:26:42 +02:00
if (!LOCK_bugcheck++) {
#ifdef DEV_BUILD
#if !defined(WIN_NT)
/* The lock file has some problem - copy it for later analysis */
{
TEXT buffer[2 * MAXPATHLEN];
TEXT buffer2[2 * MAXPATHLEN];
2001-05-23 15:26:42 +02:00
TEXT hostname[64];
gds__prefix_lock(buffer, LOCK_FILE);
2003-12-31 06:36:12 +01:00
const TEXT* lock_file = buffer;
2001-05-23 15:26:42 +02:00
sprintf(buffer2, lock_file,
ISC_get_host(hostname, sizeof(hostname)));
sprintf(buffer, "cp %s isc_lock1.%d", buffer2, getpid());
system(buffer);
}
#endif /* WIN_NT */
#endif /* DEV_BUILD */
/* If the current mutex acquirer is in the same process,
release the mutex */
if (LOCK_header && (LOCK_header->lhb_active_owner > 0)) {
const own* owner = (own*) SRQ_ABS_PTR(LOCK_header->lhb_active_owner);
2001-05-23 15:26:42 +02:00
if (owner->own_process_id == LOCK_pid)
release(LOCK_header->lhb_active_owner);
}
if (status_vector) {
*status_vector++ = isc_arg_gds;
*status_vector++ = isc_lockmanerr;
*status_vector++ = isc_arg_gds;
*status_vector++ = isc_random;
*status_vector++ = isc_arg_string;
2003-04-10 12:08:31 +02:00
*status_vector++ = (ISC_STATUS) string;
*status_vector++ = isc_arg_end;
2001-05-23 15:26:42 +02:00
return;
}
}
#ifdef DEV_BUILD
/* Make a core drop - we want to LOOK at this failure! */
abort();
#endif
exit(FINI_ERROR);
}
2008-01-17 14:40:35 +01:00
static bool convert(SRQ_PTR request_offset,
UCHAR type,
SSHORT lck_wait,
2003-09-04 23:26:15 +02:00
lock_ast_t ast_routine,
2008-01-17 14:40:35 +01:00
void* ast_argument,
ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* c o n v e r t
*
**************************************
*
* Functional description
* Perform a lock conversion, if possible. If the lock cannot be
* granted immediately, either return immediately or wait depending
2003-09-04 23:26:15 +02:00
* on a wait flag. If the lock is granted return true, otherwise
* return false. Note: if the conversion would cause a deadlock,
2001-05-23 15:26:42 +02:00
* FALSE is returned even if wait was requested.
*
**************************************/
ASSERT_ACQUIRED;
2003-12-31 06:36:12 +01:00
lrq* request = get_request(request_offset);
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
SRQ_PTR owner_offset = request->lrq_owner;
2001-05-23 15:26:42 +02:00
post_history(his_convert, owner_offset, request->lrq_lock, request_offset,
2003-09-04 23:26:15 +02:00
true);
2001-05-23 15:26:42 +02:00
request->lrq_requested = type;
request->lrq_flags &= ~LRQ_blocking_seen;
/* Compute the state of the lock without the request. */
--lock->lbl_counts[request->lrq_state];
2003-12-31 06:36:12 +01:00
const UCHAR temp = lock_state(lock);
2001-05-23 15:26:42 +02:00
/* If the requested lock level is compatible with the current state
of the lock, just grant the request. Easy enough. */
2008-01-16 08:40:12 +01:00
if (compatibility[type][temp])
2001-05-23 15:26:42 +02:00
{
request->lrq_ast_routine = ast_routine;
request->lrq_ast_argument = ast_argument;
grant(request, lock);
post_pending(lock);
release(owner_offset);
2003-09-04 23:26:15 +02:00
return true;
2001-05-23 15:26:42 +02:00
}
++lock->lbl_counts[request->lrq_state];
/* If we weren't requested to wait, just forget about the whole thing.
Otherwise wait for the request to be granted or rejected */
if (lck_wait) {
2003-12-31 06:36:12 +01:00
bool new_ast;
2001-05-23 15:26:42 +02:00
if (request->lrq_ast_routine != ast_routine ||
request->lrq_ast_argument != ast_argument)
2003-12-31 06:36:12 +01:00
{
2003-09-04 23:26:15 +02:00
new_ast = true;
2003-12-31 06:36:12 +01:00
}
2001-05-23 15:26:42 +02:00
else
2003-09-04 23:26:15 +02:00
new_ast = false;
2001-05-23 15:26:42 +02:00
if (wait_for_request(request, lck_wait, status_vector)) {
ASSERT_RELEASED;
2003-09-04 23:26:15 +02:00
return false;
2001-05-23 15:26:42 +02:00
}
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
2001-05-23 15:26:42 +02:00
if (!(request->lrq_flags & LRQ_rejected)) {
if (new_ast) {
acquire(owner_offset);
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
2001-05-23 15:26:42 +02:00
request->lrq_ast_routine = ast_routine;
request->lrq_ast_argument = ast_argument;
release(owner_offset);
}
ASSERT_RELEASED;
2003-09-04 23:26:15 +02:00
return true;
2001-05-23 15:26:42 +02:00
}
acquire(owner_offset);
request = get_request(request_offset);
2008-01-16 08:40:12 +01:00
lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2001-05-23 15:26:42 +02:00
post_pending(lock);
}
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
2001-05-23 15:26:42 +02:00
request->lrq_requested = request->lrq_state;
ASSERT_ACQUIRED;
++LOCK_header->lhb_denies;
if (lck_wait < 0)
++LOCK_header->lhb_timeouts;
release(owner_offset);
*status_vector++ = isc_arg_gds;
*status_vector++ = (lck_wait > 0) ? isc_deadlock :
((lck_wait < 0) ? isc_lock_timeout : isc_lock_conflict);
*status_vector++ = isc_arg_end;
2001-05-23 15:26:42 +02:00
2003-09-04 23:26:15 +02:00
return false;
2001-05-23 15:26:42 +02:00
}
2008-01-16 08:40:12 +01:00
static bool create_owner(ISC_STATUS* status_vector,
LOCK_OWNER_T owner_id,
UCHAR owner_type,
2008-01-17 14:40:35 +01:00
SRQ_PTR* owner_handle)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* c r e a t e _ o w n e r
*
**************************************
*
* Functional description
* Create an owner block.
*
**************************************/
2008-01-16 08:40:12 +01:00
if (LOCK_header->lhb_version != LHB_VERSION)
2001-05-23 15:26:42 +02:00
{
2008-01-16 08:40:12 +01:00
TEXT bug_buffer[BUFFER_TINY];
sprintf(bug_buffer,
2001-05-23 15:26:42 +02:00
"inconsistent lock table version number; found %d, expected %d",
2008-01-16 08:40:12 +01:00
LOCK_header->lhb_version, LHB_VERSION);
bug(status_vector, bug_buffer);
return false;
2001-05-23 15:26:42 +02:00
}
acquire(DUMMY_OWNER_CREATE); /* acquiring owner is being created */
/* Look for a previous instance of owner. If we find one, get rid of it. */
srq* lock_srq;
SRQ_LOOP(LOCK_header->lhb_owners, lock_srq)
2001-05-23 15:26:42 +02:00
{
own* owner = (own*) ((UCHAR *) lock_srq - OFFSET(own*, own_lhb_owners));
2004-05-20 23:58:15 +02:00
if (owner->own_owner_id == owner_id &&
2001-05-23 15:26:42 +02:00
(UCHAR)owner->own_owner_type == owner_type)
{
purge_owner(DUMMY_OWNER_CREATE, owner); /* purging owner_offset has not been set yet */
break;
}
}
/* Allocate an owner block */
2003-12-31 06:36:12 +01:00
own* owner = 0;
if (SRQ_EMPTY(LOCK_header->lhb_free_owners))
2001-05-23 15:26:42 +02:00
{
if (!(owner = (own*) alloc(sizeof(own), status_vector)))
2001-05-23 15:26:42 +02:00
{
release_mutex();
2008-01-16 08:40:12 +01:00
return false;
2001-05-23 15:26:42 +02:00
}
}
else
{
owner = (own*) ((UCHAR *) SRQ_NEXT(LOCK_header->lhb_free_owners) -
OFFSET(own*, own_lhb_owners));
2001-05-23 15:26:42 +02:00
remove_que(&owner->own_lhb_owners);
}
2008-01-16 08:40:12 +01:00
init_owner_block(owner, owner_type, owner_id);
2001-05-23 15:26:42 +02:00
/* cannot ASSERT_ACQUIRED; here - owner not setup */
insert_tail(&LOCK_header->lhb_owners, &owner->own_lhb_owners);
#ifndef SUPERSERVER
probe_owners(SRQ_REL_PTR(owner));
2001-05-23 15:26:42 +02:00
#endif
*owner_handle = SRQ_REL_PTR(owner);
2001-05-23 15:26:42 +02:00
LOCK_header->lhb_active_owner = *owner_handle;
#ifdef VALIDATE_LOCK_TABLE
validate_lhb(LOCK_header);
#endif
release(*owner_handle);
2008-01-16 08:40:12 +01:00
return true;
2001-05-23 15:26:42 +02:00
}
#ifdef DEV_BUILD
2003-09-04 23:26:15 +02:00
static void current_is_active_owner(bool expect_acquired, ULONG line)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* c u r r e n t _ i s _ a c t i v e _ o w n e r
*
**************************************
*
* Functional description
* Decide if the current process is the active owner
* for the lock table. Used in assertion checks.
*
**************************************/
/* Do not ASSERT_ACQUIRED in this routine */
/* If there's no header, we must be setting up in init somewhere */
if (!LOCK_header)
return;
/* Use a local copy of lhb_active_owner. We're viewing the lock table
2003-09-04 23:26:15 +02:00
without being acquired in the "expect_acquired false" case, so it
2001-05-23 15:26:42 +02:00
can change out from under us. We don't care that it changes, but
2008-01-16 08:40:12 +01:00
if it gets set to DUMMY_OWNER_CREATE or DUMMY_OWNER_DELETE
it can lead to a core drop when we try to map the owner pointer */
2001-05-23 15:26:42 +02:00
SRQ_PTR owner_ptr = LOCK_header->lhb_active_owner;
2001-05-23 15:26:42 +02:00
/* If no active owner, then we certainly aren't the active owner */
if (!owner_ptr) {
if (!expect_acquired)
return;
bug_assert("not acquired", line);
}
/* When creating or deleting an owner the owner offset is set such that
* we can't be sure if WE have the lock table acquired, or someone else
* has it, and they just happen to be doing the same operation. So, we
* don't try to report any problems when the lock table is in that state
*/
/* If active owner is DUMMY_OWNER_CREATE, then we're creating a new owner */
if (owner_ptr == DUMMY_OWNER_CREATE)
return;
/* If active owner is DUMMY_OWNER_DELETE, then we're deleting an owner */
if (owner_ptr == DUMMY_OWNER_DELETE)
return;
/* Find the active owner, and see if it is us */
own* owner = (own*) SRQ_ABS_PTR(owner_ptr);
2001-05-23 15:26:42 +02:00
/* SUPERSERVER uses the same pid for all threads, so the tests
below are of limited utility and can cause bogus errors */
#ifndef SUPERSERVER
2003-09-18 12:24:03 +02:00
own owner_copy;
2001-05-23 15:26:42 +02:00
if (owner->own_process_id == LOCK_pid) {
if (expect_acquired)
return;
/* Save a copy for the debugger before we abort */
memcpy(&owner_copy, owner, sizeof(owner_copy));
bug_assert("not acquired", line);
}
else {
if (!expect_acquired)
return;
/* Save a copy for the debugger before we abort */
memcpy(&owner_copy, owner, sizeof(owner_copy));
bug_assert("not released", line);
}
#endif // SUPERSERVER
}
#endif // DEV_BUILD
2008-01-17 14:40:35 +01:00
static void deadlock_clear()
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* d e a d l o c k _ c l e a r
*
**************************************
*
* Functional description
* Clear deadlock and scanned bits for pending requests
* in preparation for a deadlock scan.
*
**************************************/
ASSERT_ACQUIRED;
srq* lock_srq;
SRQ_LOOP(LOCK_header->lhb_owners, lock_srq) {
own* owner = (own*) ((UCHAR *) lock_srq - OFFSET(own*, own_lhb_owners));
SRQ_PTR pending_offset = owner->own_pending_request;
2003-12-31 06:36:12 +01:00
if (!pending_offset)
2001-05-23 15:26:42 +02:00
continue;
2008-01-16 08:40:12 +01:00
lrq* pending = (lrq*) SRQ_ABS_PTR(pending_offset);
2001-05-23 15:26:42 +02:00
pending->lrq_flags &= ~(LRQ_deadlock | LRQ_scanned);
}
}
2008-01-16 08:40:12 +01:00
static lrq* deadlock_scan(own* owner,
lrq* request)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* d e a d l o c k _ s c a n
*
**************************************
*
* Functional description
* Given an owner block that has been stalled for some time, find
* a deadlock cycle if there is one. If a deadlock is found, return
* the address of a pending lock request in the deadlock request.
* If no deadlock is found, return null.
*
**************************************/
LOCK_TRACE(
("deadlock_scan: owner %ld request %ld\n", SRQ_REL_PTR(owner),
SRQ_REL_PTR(request)));
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
++LOCK_header->lhb_scans;
post_history(his_scan, request->lrq_owner, request->lrq_lock,
SRQ_REL_PTR(request), true);
2001-05-23 15:26:42 +02:00
deadlock_clear();
#ifdef VALIDATE_LOCK_TABLE
validate_lhb(LOCK_header);
#endif
2003-12-31 06:36:12 +01:00
bool maybe_deadlock = false;
lrq* victim = deadlock_walk(request, &maybe_deadlock);
2001-05-23 15:26:42 +02:00
/* Only when it is certain that this request is not part of a deadlock do we
mark this request as 'scanned' so that we will not check this request again.
Note that this request might be part of multiple deadlocks. */
if (!victim && !maybe_deadlock)
owner->own_flags |= OWN_scanned;
#ifdef DEBUG
else if (!victim && maybe_deadlock)
DEBUG_MSG(0, ("deadlock_scan: not marking due to maybe_deadlock\n"));
#endif
return victim;
}
2008-01-16 08:40:12 +01:00
static lrq* deadlock_walk(lrq* request,
2008-01-17 14:40:35 +01:00
bool* maybe_deadlock)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* d e a d l o c k _ w a l k
*
**************************************
*
* Functional description
* Given a request that is waiting, determine whether a deadlock has
* occured.
*
**************************************/
/* If this request was scanned for deadlock earlier than don't
visit it again. */
if (request->lrq_flags & LRQ_scanned)
return NULL;
/* If this request has been seen already during this deadlock-walk, then we
detected a circle in the wait-for graph. Return "deadlock". */
if (request->lrq_flags & LRQ_deadlock)
return request;
/* Remember that this request is part of the wait-for graph. */
request->lrq_flags |= LRQ_deadlock;
/* Check if this is a conversion request. */
2003-12-31 06:36:12 +01:00
const bool conversion = (request->lrq_state > LCK_null);
2001-05-23 15:26:42 +02:00
/* Find the parent lock of the request */
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2001-05-23 15:26:42 +02:00
/* Loop thru the requests granted against the lock. If any granted request is
blocking the request we're handling, recurse to find what's blocking him. */
srq* lock_srq;
SRQ_LOOP(lock->lbl_requests, lock_srq) {
2008-01-16 08:40:12 +01:00
lrq* block = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_lbl_requests));
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
if (!lockOrdering() || conversion) {
2001-05-23 15:26:42 +02:00
/* Don't pursue our own lock-request again. */
if (request == block)
continue;
/* Since lock conversions can't follow the fairness rules (to avoid
deadlocks), only granted lock requests need to be examined. */
/* If lock-ordering is turned off (opening the door for starvation),
only granted requests can block our request. */
2008-01-16 08:40:12 +01:00
if (compatibility[request->lrq_requested][block->lrq_state])
2001-05-23 15:26:42 +02:00
continue;
}
else {
/* Don't pursue our own lock-request again. In addition, don't look
at requests that arrived after our request because lock-ordering
is in effect. */
if (request == block)
break;
/* Since lock ordering is in effect, granted locks and waiting
requests that arrived before our request could block us. */
2008-01-16 08:40:12 +01:00
if (compatibility
[request->lrq_requested][MAX(block->lrq_state, block->lrq_requested)])
2003-12-31 06:36:12 +01:00
{
continue;
}
2001-05-23 15:26:42 +02:00
}
/* Don't pursue lock owners that are not blocked themselves
(they can't cause a deadlock). */
own* owner = (own*) SRQ_ABS_PTR(block->lrq_owner);
2001-05-23 15:26:42 +02:00
/* Don't pursue lock owners that still have to finish processing their AST.
If the blocking queue is not empty, then the owner still has some
AST's to process (or lock reposts).
Remember this fact because they still might be part of a deadlock. */
if (owner->own_ast_flags & OWN_signaled ||
!SRQ_EMPTY((owner->own_blocks)))
2003-12-31 06:36:12 +01:00
{
2003-09-04 23:26:15 +02:00
*maybe_deadlock = true;
2001-05-23 15:26:42 +02:00
continue;
}
/* YYY: Note: can the below code be moved to the
start of this block? Before the OWN_signaled check?
*/
/* Get pointer to the waiting request whose owner also owns a lock
that blocks the input request. */
const SRQ_PTR pending_offset = owner->own_pending_request;
2003-12-31 06:36:12 +01:00
if (!pending_offset)
2001-05-23 15:26:42 +02:00
continue;
2008-01-16 08:40:12 +01:00
lrq* target = (lrq*) SRQ_ABS_PTR(pending_offset);
2001-05-23 15:26:42 +02:00
/* If this waiting request is not pending anymore, then things are changing
and this request cannot be part of a deadlock. */
if (!(target->lrq_flags & LRQ_pending))
continue;
/* Check who is blocking the request whose owner is blocking the input
request. */
if (target = deadlock_walk(target, maybe_deadlock))
return target;
}
/* This branch of the wait-for graph is exhausted, the current waiting
request is not part of a deadlock. */
request->lrq_flags &= ~LRQ_deadlock;
request->lrq_flags |= LRQ_scanned;
return NULL;
}
2008-01-17 14:40:35 +01:00
static void dequeue(SRQ_PTR request_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* d e q u e u e
*
**************************************
*
* Functional description
* Release an outstanding lock.
*
**************************************/
/* Acquire the data structure, and compute addresses of both lock
request and lock */
2003-12-31 06:36:12 +01:00
lrq* request = get_request(request_offset);
2001-05-23 15:26:42 +02:00
post_history(his_deq, request->lrq_owner, request->lrq_lock,
2003-09-04 23:26:15 +02:00
request_offset, true);
2001-05-23 15:26:42 +02:00
request->lrq_ast_routine = NULL;
release_request(request);
}
#ifdef DEBUG
static ULONG delay_count = 0;
static ULONG last_signal_line = 0;
static ULONG last_delay_line = 0;
2008-01-17 14:40:35 +01:00
static void debug_delay(ULONG lineno)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* d e b u g _ d e l a y
*
**************************************
*
* Functional description
* This is a debugging routine, the purpose of which is to slow
* down operations in order to expose windows of critical
* sections.
*
**************************************/
ULONG i;
/* Delay for a while */
last_delay_line = lineno;
for (i = 0; i < 10000; i++)
/* Nothing */ ;
/* Occasionally crash for robustness testing */
/*
if ((delay_count % 500) == 0)
exit (-1);
*/
for (i = 0; i < 10000; i++)
/* Nothing */ ;
}
#endif
2008-01-17 14:40:35 +01:00
static void exit_handler(void* arg)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* e x i t _ h a n d l e r
*
**************************************
*
* Functional description
* Release the process block, any outstanding locks,
* and unmap the lock manager. This is usually called
* by the cleanup handler.
*
**************************************/
2003-04-16 12:18:51 +02:00
ISC_STATUS_ARRAY local_status;
2001-05-23 15:26:42 +02:00
if (!LOCK_header) {
2001-05-23 15:26:42 +02:00
return;
}
2001-05-23 15:26:42 +02:00
#ifndef SUPERSERVER
/* For a superserver (e.g. Netware), since the server is going away,
the semaphores cleanups are also happening; things are in a flux;
all the threads are going away -- so don't get into any trouble
by doing purge_owner() below. */
/* Get rid of all the owners belonging to the current process */
SRQ_PTR owner_offset = LOCK_owner_offset;
2003-12-31 06:36:12 +01:00
if (owner_offset) {
#ifdef USE_BLOCKING_THREAD
2001-05-23 15:26:42 +02:00
shutdown_blocking_thread(local_status);
#else
2008-01-16 08:40:12 +01:00
#if defined HAVE_MMAP || defined WIN_NT
2003-12-31 06:36:12 +01:00
if (LOCK_owner) {
2001-07-12 07:46:06 +02:00
ISC_unmap_object(local_status, &LOCK_data,
2003-09-18 12:24:03 +02:00
(UCHAR**)&LOCK_owner, sizeof(own));
2003-12-31 06:36:12 +01:00
}
2001-05-23 15:26:42 +02:00
#endif
#endif
if (owner_offset != LOCK_header->lhb_active_owner)
acquire(DUMMY_OWNER_DELETE);
srq* lock_srq;
SRQ_LOOP(LOCK_header->lhb_owners, lock_srq) {
own* owner = (own*) ((UCHAR *) lock_srq - OFFSET(own*, own_lhb_owners));
2001-05-23 15:26:42 +02:00
if (owner->own_process_id == LOCK_pid) {
lock_srq = (SRQ) SRQ_ABS_PTR(lock_srq->srq_backward);
purge_owner(SRQ_REL_PTR(owner), owner);
2001-05-23 15:26:42 +02:00
break;
}
}
release_mutex();
LOCK_owner_offset = 0;
}
#endif
ISC_unmap_file(local_status, &LOCK_data, 0);
}
2008-01-17 14:40:35 +01:00
static lbl* find_lock(SRQ_PTR parent,
USHORT series,
const UCHAR* value,
USHORT length,
USHORT* slot)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* f i n d _ l o c k
*
**************************************
*
* Functional description
* Find a lock block given a resource
* name. If it doesn't exist, the hash
* slot will be useful for enqueing a
* lock.
*
**************************************/
/* Hash the value preserving its distribution as much as possible */
2003-12-31 06:36:12 +01:00
ULONG hash_value = 0;
{ // scope
2004-08-26 20:14:14 +02:00
UCHAR* p = NULL; // silence uninitialized warning
2003-12-31 06:36:12 +01:00
const UCHAR* q = value;
for (USHORT l = 0; l < length; l++) {
2001-05-23 15:26:42 +02:00
if (!(l & 3))
2003-12-31 06:36:12 +01:00
p = (UCHAR *) &hash_value;
*p++ += *q++;
2001-05-23 15:26:42 +02:00
}
2003-12-31 06:36:12 +01:00
} // scope
2001-05-23 15:26:42 +02:00
/* See if the lock already exists */
2003-12-31 06:36:12 +01:00
const USHORT hash_slot = *slot = (USHORT) (hash_value % LOCK_header->lhb_hash_slots);
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
2003-12-31 06:36:12 +01:00
srq* hash_header = &LOCK_header->lhb_hash[hash_slot];
2001-05-23 15:26:42 +02:00
for (srq* lock_srq = (SRQ) SRQ_ABS_PTR(hash_header->srq_forward);
lock_srq != hash_header; lock_srq = (SRQ) SRQ_ABS_PTR(lock_srq->srq_forward))
2003-12-31 06:36:12 +01:00
{
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) ((UCHAR *) lock_srq - OFFSET(lbl*, lbl_lhb_hash));
2001-05-23 15:26:42 +02:00
if (lock->lbl_series != series ||
lock->lbl_length != length || lock->lbl_parent != parent)
2003-12-31 06:36:12 +01:00
{
2001-05-23 15:26:42 +02:00
continue;
2003-12-31 06:36:12 +01:00
}
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
if (!length || !memcmp(value, lock->lbl_key, length))
return lock;
2001-05-23 15:26:42 +02:00
}
return NULL;
}
2008-01-17 14:40:35 +01:00
static lrq* get_request(SRQ_PTR offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* g e t _ r e q u e s t
*
**************************************
*
* Functional description
* Locate and validate user supplied request offset.
*
**************************************/
TEXT s[32];
2008-01-16 08:40:12 +01:00
lrq* request = (lrq*) SRQ_ABS_PTR(offset);
2008-01-17 14:40:35 +01:00
if (offset == -1 || request->lrq_type != type_lrq) {
2003-04-01 13:43:47 +02:00
sprintf(s, "invalid lock id (%"SLONGFORMAT")", offset);
2001-05-23 15:26:42 +02:00
bug(NULL, s);
}
2008-01-16 08:40:12 +01:00
const lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2001-05-23 15:26:42 +02:00
if (lock->lbl_type != type_lbl) {
2003-04-01 13:43:47 +02:00
sprintf(s, "invalid lock (%"SLONGFORMAT")", offset);
2001-05-23 15:26:42 +02:00
bug(NULL, s);
}
return request;
}
2008-01-17 14:40:35 +01:00
static void grant(lrq* request, lbl* lock)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* g r a n t
*
**************************************
*
* Functional description
* Grant a lock request. If the lock is a conversion, assume the caller
* has already decremented the former lock type count in the lock block.
*
**************************************/
/* Request must be for THIS lock */
CHECK(SRQ_REL_PTR(lock) == request->lrq_lock);
2001-05-23 15:26:42 +02:00
post_history(his_grant, request->lrq_owner, request->lrq_lock,
SRQ_REL_PTR(request), true);
2001-05-23 15:26:42 +02:00
++lock->lbl_counts[request->lrq_requested];
request->lrq_state = request->lrq_requested;
if (request->lrq_data) {
remove_que(&lock->lbl_lhb_data);
if (lock->lbl_data = request->lrq_data)
insert_data_que(lock);
request->lrq_data = 0;
}
lock->lbl_state = lock_state(lock);
if (request->lrq_flags & LRQ_pending)
{
request->lrq_flags &= ~LRQ_pending;
lock->lbl_pending_lrq_count--;
}
post_wakeup((own*) SRQ_ABS_PTR(request->lrq_owner));
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
static SRQ_PTR grant_or_que(lrq* request, lbl* lock, SSHORT lck_wait)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* g r a n t _ o r _ q u e
*
**************************************
*
* Functional description
* There is a request against an existing lock. If the request
* is compatible with the lock, grant it. Otherwise lock_srq it.
* If the lock is lock_srq-ed, set up the machinery to do a deadlock
2001-05-23 15:26:42 +02:00
* scan in awhile.
*
**************************************/
const SRQ_PTR request_offset = SRQ_REL_PTR(request);
request->lrq_lock = SRQ_REL_PTR(lock);
2001-05-23 15:26:42 +02:00
/* Compatible requests are easy to satify. Just post the request
to the lock, update the lock state, release the data structure,
and we're done. */
2008-01-16 08:40:12 +01:00
if (compatibility[request->lrq_requested][lock->lbl_state]) {
if (!lockOrdering() ||
2001-05-23 15:26:42 +02:00
request->lrq_requested == LCK_null ||
2003-12-31 06:36:12 +01:00
(lock->lbl_pending_lrq_count == 0))
{
2001-05-23 15:26:42 +02:00
grant(request, lock);
post_pending(lock);
release(request->lrq_owner);
return request_offset;
}
2003-12-31 06:36:12 +01:00
}
2001-05-23 15:26:42 +02:00
/* The request isn't compatible with the current state of the lock.
* If we haven't be asked to wait for the lock, return now.
*/
if (lck_wait)
{
wait_for_request(request, lck_wait, NULL);
2001-05-23 15:26:42 +02:00
/* For performance reasons, we're going to look at the
* request's status without re-acquiring the lock table.
* This is safe as no-one can take away the request, once
* granted, and we're doing a read-only access
*/
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
2001-05-23 15:26:42 +02:00
/* Request HAS been resolved */
CHECK(!(request->lrq_flags & LRQ_pending));
if (!(request->lrq_flags & LRQ_rejected))
return request_offset;
acquire(request->lrq_owner);
}
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
2001-05-23 15:26:42 +02:00
post_history(his_deny, request->lrq_owner, request->lrq_lock,
SRQ_REL_PTR(request), true);
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
++LOCK_header->lhb_denies;
if (lck_wait < 0)
++LOCK_header->lhb_timeouts;
const SRQ_PTR owner_offset = request->lrq_owner;
2001-05-23 15:26:42 +02:00
release_request(request);
release(owner_offset);
return (SRQ_PTR)0;
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
static bool init_lock_table(ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* i n i t _ l o c k _ t a b l e
*
**************************************
*
* Functional description
* Initialize the global lock table for the first time.
* Read the config file, map the shared region, etc.
*
**************************************/
LOCK_pid = getpid();
2004-12-16 04:03:13 +01:00
TEXT buffer[MAXPATHLEN];
2001-05-23 15:26:42 +02:00
gds__prefix_lock(buffer, LOCK_FILE);
2004-12-16 04:03:13 +01:00
TEXT* lock_file = buffer;
2008-01-16 08:40:12 +01:00
if (!(LOCK_header = (lhb*) ISC_map_file(status_vector, lock_file,
lock_initialize, 0,
Config::getLockMemSize(),
&LOCK_data)))
2003-12-31 06:36:12 +01:00
{
2008-01-16 08:40:12 +01:00
return false;
2001-05-23 15:26:42 +02:00
}
gds__register_cleanup(exit_handler, 0);
2008-01-16 08:40:12 +01:00
return true;
2001-05-23 15:26:42 +02:00
}
2008-01-16 08:40:12 +01:00
static void init_owner_block(own* owner,
2001-05-23 15:26:42 +02:00
UCHAR owner_type,
2008-01-16 08:40:12 +01:00
LOCK_OWNER_T owner_id)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* i n i t _ o w n e r _ b l o c k
*
**************************************
*
* Functional description
* Initialize the passed owner block nice and new.
*
**************************************/
owner->own_type = type_own;
owner->own_owner_type = owner_type;
owner->own_flags = 0;
owner->own_ast_flags = 0;
owner->own_ast_hung_flags = 0;
owner->own_count = 1;
owner->own_owner_id = owner_id;
SRQ_INIT(owner->own_lhb_owners);
SRQ_INIT(owner->own_requests);
SRQ_INIT(owner->own_blocks);
2001-05-23 15:26:42 +02:00
owner->own_pending_request = 0;
owner->own_process_id = LOCK_pid;
owner->own_acquire_time = 0;
2008-01-16 08:40:12 +01:00
#ifdef USE_BLOCKING_THREAD
ISC_event_init(&owner->own_blocking, 0, BLOCKING_SIGNAL);
2001-05-23 15:26:42 +02:00
#endif
2008-01-16 08:40:12 +01:00
#ifdef PREVENT_OWNER_STARVATION
ISC_event_init(&owner->own_stall, 0, STALL_SIGNAL);
2001-05-23 15:26:42 +02:00
#endif
2008-01-16 08:40:12 +01:00
ISC_event_init(&owner->own_wakeup, 0, WAKEUP_SIGNAL);
2001-05-23 15:26:42 +02:00
}
static void lock_initialize(void* arg, SH_MEM shmem_data, bool initialize)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* l o c k _ i n i t i a l i z e
*
**************************************
*
* Functional description
* Initialize the lock header block. The caller is assumed
* to have an exclusive lock on the lock file.
*
**************************************/
2002-10-24 16:53:49 +02:00
#ifdef WIN_NT
char buffer[MAXPATHLEN];
gds__prefix_lock(buffer, LOCK_FILE);
if (ISC_mutex_init(MUTEX, buffer)) {
2002-10-24 16:53:49 +02:00
bug(NULL, "mutex init failed");
}
#endif
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
LOCK_header = (lhb*) shmem_data->sh_mem_address;
2001-05-23 15:26:42 +02:00
if (!initialize) {
2001-05-23 15:26:42 +02:00
return;
}
2001-05-23 15:26:42 +02:00
2003-09-18 12:24:03 +02:00
memset(LOCK_header, 0, sizeof(lhb));
2001-05-23 15:26:42 +02:00
LOCK_header->lhb_type = type_lhb;
LOCK_header->lhb_version = LHB_VERSION;
2003-11-04 00:59:24 +01:00
/* Mark ourselves as active owner to prevent fb_assert() checks */
2001-05-23 15:26:42 +02:00
LOCK_header->lhb_active_owner = DUMMY_OWNER_CREATE; /* In init of lock system */
SRQ_INIT(LOCK_header->lhb_owners);
SRQ_INIT(LOCK_header->lhb_free_owners);
SRQ_INIT(LOCK_header->lhb_free_locks);
SRQ_INIT(LOCK_header->lhb_free_requests);
2001-05-23 15:26:42 +02:00
#ifndef WIN_NT
2008-01-16 08:40:12 +01:00
if (ISC_mutex_init(MUTEX)) {
bug(NULL, "mutex init failed");
}
#endif
2008-01-16 08:40:12 +01:00
int hash_slots = Config::getLockHashSlots();
if (hash_slots < HASH_MIN_SLOTS)
hash_slots = HASH_MIN_SLOTS;
if (hash_slots > HASH_MAX_SLOTS)
hash_slots = HASH_MAX_SLOTS;
LOCK_header->lhb_hash_slots = (USHORT) hash_slots;
LOCK_header->lhb_scan_interval = Config::getDeadlockTimeout();
LOCK_header->lhb_acquire_spins = Config::getLockAcquireSpins();
2001-05-23 15:26:42 +02:00
/* Initialize lock series data queues and lock hash chains. */
2004-12-16 04:03:13 +01:00
USHORT i;
SRQ lock_srq;
for (i = 0, lock_srq = LOCK_header->lhb_data; i < LCK_MAX_SERIES; i++, lock_srq++)
2001-05-23 15:26:42 +02:00
{
SRQ_INIT((*lock_srq));
2001-05-23 15:26:42 +02:00
}
for (i = 0, lock_srq = LOCK_header->lhb_hash; i < LOCK_header->lhb_hash_slots;
i++, lock_srq++)
2001-05-23 15:26:42 +02:00
{
SRQ_INIT((*lock_srq));
2001-05-23 15:26:42 +02:00
}
/* Set lock_ordering flag for the first time */
2008-01-16 08:40:12 +01:00
if (Config::getLockGrantOrder())
2001-05-23 15:26:42 +02:00
LOCK_header->lhb_flags |= LHB_lock_ordering;
2007-01-24 20:48:58 +01:00
const SLONG length =
2003-09-18 12:24:03 +02:00
sizeof(lhb) +
2001-05-23 15:26:42 +02:00
(LOCK_header->lhb_hash_slots * sizeof(LOCK_header->lhb_hash[0]));
LOCK_header->lhb_length = shmem_data->sh_mem_length_mapped;
2003-10-27 19:42:47 +01:00
LOCK_header->lhb_used = FB_ALIGN(length, ALIGNMENT);
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
shb* secondary_header = (shb*) alloc(sizeof(shb), NULL);
2004-12-16 04:03:13 +01:00
if (!secondary_header)
2001-05-23 15:26:42 +02:00
{
gds__log("Fatal lock manager error: lock manager out of room");
exit(STARTUP_ERROR);
}
LOCK_header->lhb_secondary = SRQ_REL_PTR(secondary_header);
2003-09-18 12:24:03 +02:00
secondary_header->shb_type = type_shb;
secondary_header->shb_remove_node = 0;
secondary_header->shb_insert_que = 0;
secondary_header->shb_insert_prior = 0;
2001-05-23 15:26:42 +02:00
/* Allocate a sufficiency of history blocks */
2008-01-16 08:40:12 +01:00
his* history;
2004-12-16 04:03:13 +01:00
for (USHORT j = 0; j < 2; j++)
2001-05-23 15:26:42 +02:00
{
2004-12-16 04:03:13 +01:00
SRQ_PTR* prior = (j == 0) ?
&LOCK_header->lhb_history : &secondary_header->shb_history;
2001-05-23 15:26:42 +02:00
for (i = 0; i < HISTORY_BLOCKS; i++)
{
2008-01-16 08:40:12 +01:00
if (!(history = (his*) alloc(sizeof(his), NULL)))
2001-05-23 15:26:42 +02:00
{
gds__log("Fatal lock manager error: lock manager out of room");
exit(STARTUP_ERROR);
}
*prior = SRQ_REL_PTR(history);
2001-05-23 15:26:42 +02:00
history->his_type = type_his;
history->his_operation = 0;
prior = &history->his_next;
}
history->his_next =
2003-09-18 12:24:03 +02:00
(j == 0) ? LOCK_header->lhb_history : secondary_header->shb_history;
2001-05-23 15:26:42 +02:00
}
/* Done initializing, unmark owner information */
LOCK_header->lhb_active_owner = 0;
}
2008-01-17 14:40:35 +01:00
static void insert_data_que(lbl* lock)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* i n s e r t _ d a t a _ q u e
*
**************************************
*
* Functional description
* Insert a node in the lock series data queue
* in sorted (ascending) order by lock data.
*
**************************************/
if (lock->lbl_series < LCK_MAX_SERIES && lock->lbl_parent
2003-12-31 06:36:12 +01:00
&& lock->lbl_data)
{
2004-12-16 04:03:13 +01:00
SRQ data_header = &LOCK_header->lhb_data[lock->lbl_series];
2001-05-23 15:26:42 +02:00
2004-12-16 04:03:13 +01:00
SRQ lock_srq;
for (lock_srq = (SRQ) SRQ_ABS_PTR(data_header->srq_forward);
lock_srq != data_header; lock_srq = (SRQ) SRQ_ABS_PTR(lock_srq->srq_forward))
2003-12-31 06:36:12 +01:00
{
2008-01-16 08:40:12 +01:00
lbl* lock2 = (lbl*) ((UCHAR *) lock_srq - OFFSET(lbl*, lbl_lhb_data));
2001-05-23 15:26:42 +02:00
CHECK(lock2->lbl_series == lock->lbl_series);
if (lock2->lbl_parent != lock->lbl_parent)
continue;
if (lock->lbl_data <= lock2->lbl_data)
break;
}
insert_tail(lock_srq, &lock->lbl_lhb_data);
2001-05-23 15:26:42 +02:00
}
}
2008-01-17 14:40:35 +01:00
static void insert_tail(SRQ lock_srq, SRQ node)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* i n s e r t _ t a i l
*
**************************************
*
* Functional description
* Insert a node at the tail of a lock_srq.
2001-05-23 15:26:42 +02:00
*
* To handle the event of the process terminating during
* the insertion of the node, we set values in the shb to
* indicate the node being inserted.
* Then, should we be unable to complete
* the node insert, the next process into the lock table
* will notice the uncompleted work and undo it,
* eg: it will put the queue back to the state
* prior to the insertion being started.
*
**************************************/
ASSERT_ACQUIRED;
2008-01-16 08:40:12 +01:00
shb* recover = (shb*) SRQ_ABS_PTR(LOCK_header->lhb_secondary);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
recover->shb_insert_que = SRQ_REL_PTR(lock_srq);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
recover->shb_insert_prior = lock_srq->srq_backward;
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
node->srq_forward = SRQ_REL_PTR(lock_srq);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
node->srq_backward = lock_srq->srq_backward;
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
2004-12-16 04:03:13 +01:00
SRQ prior = (SRQ) SRQ_ABS_PTR(lock_srq->srq_backward);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
prior->srq_forward = SRQ_REL_PTR(node);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
lock_srq->srq_backward = SRQ_REL_PTR(node);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
recover->shb_insert_que = 0;
DEBUG_DELAY;
recover->shb_insert_prior = 0;
DEBUG_DELAY;
}
2008-01-17 14:40:35 +01:00
static USHORT lock_state(lbl* lock)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* l o c k _ s t a t e
*
**************************************
*
* Functional description
* Compute the current state of a lock.
*
**************************************/
if (lock->lbl_counts[LCK_EX])
return LCK_EX;
2008-01-16 08:40:12 +01:00
if (lock->lbl_counts[LCK_PW])
2001-05-23 15:26:42 +02:00
return LCK_PW;
2008-01-16 08:40:12 +01:00
if (lock->lbl_counts[LCK_SW])
2001-05-23 15:26:42 +02:00
return LCK_SW;
2008-01-16 08:40:12 +01:00
if (lock->lbl_counts[LCK_PR])
2001-05-23 15:26:42 +02:00
return LCK_PR;
2008-01-16 08:40:12 +01:00
if (lock->lbl_counts[LCK_SR])
2001-05-23 15:26:42 +02:00
return LCK_SR;
2008-01-16 08:40:12 +01:00
if (lock->lbl_counts[LCK_null])
2001-05-23 15:26:42 +02:00
return LCK_null;
return LCK_none;
}
2008-01-16 08:40:12 +01:00
static void post_blockage(lrq* request,
lbl* lock,
2003-09-04 23:26:15 +02:00
bool force)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* p o s t _ b l o c k a g e
*
**************************************
*
* Functional description
* The current request is blocked. Post blocking notices to
* any process blocking the request.
*
**************************************/
own* owner = (own*) SRQ_ABS_PTR(request->lrq_owner);
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
CHECK(owner->own_pending_request == SRQ_REL_PTR(request));
2001-05-23 15:26:42 +02:00
CHECK(request->lrq_flags & LRQ_pending);
2004-12-16 04:03:13 +01:00
SRQ_PTR next_que_offset;
for (SRQ lock_srq = SRQ_NEXT(lock->lbl_requests); lock_srq != &lock->lbl_requests;
lock_srq = (SRQ) SRQ_ABS_PTR(next_que_offset))
2003-12-31 06:36:12 +01:00
{
2004-12-16 04:03:13 +01:00
SRQ next_que = SRQ_NEXT((*lock_srq));
next_que_offset = SRQ_REL_PTR(next_que);
2008-01-16 08:40:12 +01:00
lrq* block = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_lbl_requests));
2001-05-23 15:26:42 +02:00
/* Figure out if this lock request is blocking our own lock request.
Of course, our own request cannot block ourselves. Compatible
requests don't block us, and if there is no AST routine for the
request the block doesn't matter as we can't notify anyone.
If the owner has marked the request with "LRQ_blocking_seen
then the blocking AST has been delivered and the owner promises
to release the lock as soon as possible (so don't bug the owner) */
if (block == request ||
2008-01-16 08:40:12 +01:00
compatibility[request->lrq_requested][block->lrq_state] ||
2001-05-23 15:26:42 +02:00
!block->lrq_ast_routine ||
((block->lrq_flags & LRQ_blocking_seen) && !force))
2003-12-31 06:36:12 +01:00
{
2001-05-23 15:26:42 +02:00
continue;
2003-12-31 06:36:12 +01:00
}
2001-05-23 15:26:42 +02:00
own* blocking_owner = (own*) SRQ_ABS_PTR(block->lrq_owner);
2001-05-23 15:26:42 +02:00
/* Add the blocking request to the list of blocks if it's not
there already (LRQ_blocking) */
if (!(block->lrq_flags & LRQ_blocking)) {
insert_tail(&blocking_owner->own_blocks, &block->lrq_own_blocks);
block->lrq_flags |= LRQ_blocking;
block->lrq_flags &= ~LRQ_blocking_seen;
}
if (force) {
2001-05-23 15:26:42 +02:00
blocking_owner->own_ast_flags &= ~OWN_signaled;
}
2001-05-23 15:26:42 +02:00
if (blocking_owner != owner &&
2008-01-16 08:40:12 +01:00
!signal_owner(blocking_owner, SRQ_REL_PTR(owner)))
2003-12-31 06:36:12 +01:00
{
2001-05-23 15:26:42 +02:00
/* We can't signal the blocking_owner, assume he has died
and purge him out */
lock_srq = (SRQ) SRQ_ABS_PTR(lock_srq->srq_backward);
purge_owner(SRQ_REL_PTR(owner), blocking_owner);
2001-05-23 15:26:42 +02:00
}
if (block->lrq_state == LCK_EX)
break;
}
}
2003-09-04 23:26:15 +02:00
static void post_history(USHORT operation,
SRQ_PTR process,
SRQ_PTR lock,
SRQ_PTR request,
2003-09-04 23:26:15 +02:00
bool old_version)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* p o s t _ h i s t o r y
*
**************************************
*
* Functional description
* Post a history item.
*
**************************************/
2008-01-16 08:40:12 +01:00
his* history;
2001-05-23 15:26:42 +02:00
if (old_version) {
2008-01-16 08:40:12 +01:00
history = (his*) SRQ_ABS_PTR(LOCK_header->lhb_history);
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
LOCK_header->lhb_history = history->his_next;
}
else {
ASSERT_ACQUIRED;
2008-01-16 08:40:12 +01:00
shb* recover = (shb*) SRQ_ABS_PTR(LOCK_header->lhb_secondary);
history = (his*) SRQ_ABS_PTR(recover->shb_history);
recover->shb_history = history->his_next;
2001-05-23 15:26:42 +02:00
}
history->his_operation = operation;
history->his_process = process;
history->his_lock = lock;
history->his_request = request;
}
2008-01-17 14:40:35 +01:00
static void post_pending(lbl* lock)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* p o s t _ p e n d i n g
*
**************************************
*
* Functional description
* There has been a change in state of a lock. Check pending
* requests to see if something can be granted. If so, do it.
*
**************************************/
#ifdef DEV_BUILD
USHORT pending_counter = 0;
#endif
if (lock->lbl_pending_lrq_count == 0)
return;
/* Loop thru granted requests looking for pending conversions. If one
is found, check to see if it can be granted. Even if a request cannot
be granted for compatibility reason, post_wakeup () that owner so that
it can post_blockage() to the newly granted owner of the lock. */
2004-12-16 04:03:13 +01:00
SRQ lock_srq;
SRQ_LOOP(lock->lbl_requests, lock_srq) {
2008-01-16 08:40:12 +01:00
lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_lbl_requests));
2001-05-23 15:26:42 +02:00
if (!(request->lrq_flags & LRQ_pending))
continue;
if (request->lrq_state)
{
--lock->lbl_counts[request->lrq_state];
2004-12-16 04:03:13 +01:00
const UCHAR temp_state = lock_state(lock);
2008-01-16 08:40:12 +01:00
if (compatibility[request->lrq_requested][temp_state])
2001-05-23 15:26:42 +02:00
grant(request, lock);
else {
#ifdef DEV_BUILD
pending_counter++;
#endif
++lock->lbl_counts[request->lrq_state];
own* owner = (own*) SRQ_ABS_PTR(request->lrq_owner);
2001-05-23 15:26:42 +02:00
post_wakeup(owner);
2008-01-16 08:40:12 +01:00
if (lockOrdering()) {
2001-05-23 15:26:42 +02:00
CHECK(lock->lbl_pending_lrq_count >= pending_counter);
return;
}
}
}
2008-01-16 08:40:12 +01:00
else if (compatibility[request->lrq_requested][lock->lbl_state])
2001-05-23 15:26:42 +02:00
grant(request, lock);
else {
#ifdef DEV_BUILD
pending_counter++;
#endif
own* owner = (own*) SRQ_ABS_PTR(request->lrq_owner);
2001-05-23 15:26:42 +02:00
post_wakeup(owner);
2008-01-16 08:40:12 +01:00
if (lockOrdering()) {
2001-05-23 15:26:42 +02:00
CHECK(lock->lbl_pending_lrq_count >= pending_counter);
return;
}
}
}
CHECK(lock->lbl_pending_lrq_count == pending_counter);
}
2008-01-17 14:40:35 +01:00
static void post_wakeup(own* owner)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
2008-01-16 08:40:12 +01:00
* p o s t _ w a k e u p
2001-05-23 15:26:42 +02:00
*
**************************************
*
* Functional description
* Wakeup whoever is waiting on a lock.
*
**************************************/
2008-01-16 08:40:12 +01:00
if (owner->own_flags & OWN_waiting)
{
++LOCK_header->lhb_wakeups;
owner->own_flags |= OWN_wakeup;
ISC_event_post(&owner->own_wakeup);
}
2001-05-23 15:26:42 +02:00
}
#ifndef SUPERSERVER
2008-01-17 14:40:35 +01:00
static bool probe_owners(SRQ_PTR probing_owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* p r o b e _ o w n e r s
*
**************************************
*
* Functional description
* Probe an owner to see if it still exists. If it doesn't, get
* rid of it.
*
**************************************/
2004-12-16 04:03:13 +01:00
bool purged = false;
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
2004-12-16 04:03:13 +01:00
SRQ lock_srq;
SRQ_LOOP(LOCK_header->lhb_owners, lock_srq) {
own* owner = (own*) ((UCHAR *) lock_srq - OFFSET(own*, own_lhb_owners));
2001-05-23 15:26:42 +02:00
if (owner->own_flags & OWN_signal)
signal_owner(owner, (SRQ_PTR) NULL);
2001-05-23 15:26:42 +02:00
if (owner->own_process_id != LOCK_pid &&
2008-01-16 08:40:12 +01:00
!ISC_check_process_existence(owner->own_process_id, false))
2003-12-31 06:36:12 +01:00
{
lock_srq = (SRQ) SRQ_ABS_PTR(lock_srq->srq_backward);
2001-05-23 15:26:42 +02:00
purge_owner(probing_owner_offset, owner);
2003-09-04 23:26:15 +02:00
purged = true;
2001-05-23 15:26:42 +02:00
}
}
return purged;
}
#endif /* SUPERSERVER */
static void purge_owner(SRQ_PTR purging_owner_offset, own* owner)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* p u r g e _ o w n e r
*
**************************************
*
* Functional description
* Purge an owner and all of its associated locks.
*
**************************************/
LOCK_TRACE(("purge_owner (%ld)\n", purging_owner_offset));
post_history(his_del_owner, purging_owner_offset, SRQ_REL_PTR(owner), 0,
2003-09-04 23:26:15 +02:00
false);
2001-05-23 15:26:42 +02:00
/* Release any locks that are active. */
2004-12-16 04:03:13 +01:00
SRQ lock_srq;
while ((lock_srq = SRQ_NEXT(owner->own_requests)) != &owner->own_requests) {
2008-01-16 08:40:12 +01:00
lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_own_requests));
2001-05-23 15:26:42 +02:00
release_request(request);
}
/* Release any repost requests left dangling on blocking queue. */
while ((lock_srq = SRQ_NEXT(owner->own_blocks)) != &owner->own_blocks) {
2008-01-16 08:40:12 +01:00
lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_own_blocks));
2001-05-23 15:26:42 +02:00
remove_que(&request->lrq_own_blocks);
request->lrq_type = type_null;
insert_tail(&LOCK_header->lhb_free_requests,
&request->lrq_lbl_requests);
}
/* Release owner block */
remove_que(&owner->own_lhb_owners);
insert_tail(&LOCK_header->lhb_free_owners, &owner->own_lhb_owners);
owner->own_owner_type = 0;
owner->own_owner_id = 0;
owner->own_process_id = 0;
owner->own_flags = 0;
2008-01-16 08:40:12 +01:00
#ifdef USE_BLOCKING_THREAD
ISC_event_fini(&owner->own_blocking);
#endif
#ifdef PREVENT_OWNER_STARVATION
ISC_event_fini(&owner->own_stall);
#endif
ISC_event_fini(&owner->own_wakeup);
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
static void remove_que(SRQ node)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* r e m o v e _ q u e
*
**************************************
*
* Functional description
* Remove a node from a self-relative lock_srq.
2001-05-23 15:26:42 +02:00
*
* To handle the event of the process terminating during
* the removal of the node, we set shb_remove_node to the
* node to be removed. Then, should we be unsuccessful
* in the node removal, the next process into the lock table
* will notice the uncompleted work and complete it.
*
* Note that the work is completed by again calling this routine,
* specifing the node to be removed. As the prior and following
* nodes may have changed prior to the crash, we need to redo the
* work only based on what is in <node>.
*
**************************************/
ASSERT_ACQUIRED;
2008-01-16 08:40:12 +01:00
shb* recover = (shb*) SRQ_ABS_PTR(LOCK_header->lhb_secondary);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
recover->shb_remove_node = SRQ_REL_PTR(node);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
2004-12-16 04:03:13 +01:00
SRQ lock_srq = (SRQ) SRQ_ABS_PTR(node->srq_forward);
2001-05-23 15:26:42 +02:00
/* The next link might point back to us, or our prior link should
* the backlink change occur before a crash
*/
CHECK(lock_srq->srq_backward == SRQ_REL_PTR(node) ||
lock_srq->srq_backward == node->srq_backward);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
lock_srq->srq_backward = node->srq_backward;
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
lock_srq = (SRQ) SRQ_ABS_PTR(node->srq_backward);
2001-05-23 15:26:42 +02:00
/* The prior link might point forward to us, or our following link should
* the change occur before a crash
*/
CHECK(lock_srq->srq_forward == SRQ_REL_PTR(node) ||
lock_srq->srq_forward == node->srq_forward);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
lock_srq->srq_forward = node->srq_forward;
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
recover->shb_remove_node = 0;
DEBUG_DELAY;
/* To prevent trying to remove this entry a second time, which could occur
* for instance, when we're removing an owner, and crash while removing
* the owner's blocking requests, reset the lock_srq entry in this node.
2001-05-23 15:26:42 +02:00
* Note that if we get here, shb_remove_node has been cleared, so we
* no longer need the queue information.
*/
SRQ_INIT((*node));
2001-05-23 15:26:42 +02:00
}
2008-01-17 14:40:35 +01:00
static void release(SRQ_PTR owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* r e l e a s e
*
**************************************
*
* Functional description
* Release the mapped lock file. Advance the event count to wake up
* anyone waiting for it. If there appear to be blocking items
* posted.
*
**************************************/
if (owner_offset && LOCK_header->lhb_active_owner != owner_offset)
bug(NULL, "release when not owner");
2008-01-16 08:40:12 +01:00
#ifdef PREVENT_OWNER_STARVATION
own* owner = NULL;
if (owner_offset)
owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
/* Rotate first owner to tail of active owners' queue
in search of mutex-starved owners. */
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
own* first_owner = (own*) ((UCHAR *) SRQ_NEXT(LOCK_header->lhb_owners) -
OFFSET(own*, own_lhb_owners));
remove_que(&first_owner->own_lhb_owners);
insert_tail(&LOCK_header->lhb_owners, &first_owner->own_lhb_owners);
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
/* If others stepped aside to let us run then wake them up now. */
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
if (owner_offset && owner->own_flags & OWN_starved) {
owner->own_flags &= ~(OWN_starved | OWN_blocking);
2004-12-16 04:03:13 +01:00
2008-01-16 08:40:12 +01:00
SRQ lock_srq;
SRQ_LOOP(LOCK_header->lhb_owners, lock_srq) {
owner = (own*) ((UCHAR *) lock_srq - OFFSET(own*, own_lhb_owners));
if (owner->own_flags & OWN_blocking) {
owner->own_flags &= ~OWN_blocking;
ISC_event_post(&owner->own_stall);
2001-05-23 15:26:42 +02:00
}
}
}
2008-01-16 08:40:12 +01:00
#endif
2001-05-23 15:26:42 +02:00
#ifdef VALIDATE_LOCK_TABLE
/* Validate the lock table occasionally (every 500 releases) */
if ((LOCK_header->lhb_acquires % (HISTORY_BLOCKS / 2)) == 0)
validate_lhb(LOCK_header);
#endif
release_mutex();
}
2008-01-17 14:40:35 +01:00
static void release_mutex()
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* r e l e a s e _ m u t e x
*
**************************************
*
* Functional description
* Release the mapped lock file. Advance the event count to wake up
* anyone waiting for it. If there appear to be blocking items
* posted.
*
**************************************/
DEBUG_DELAY;
2008-01-16 08:40:12 +01:00
if (!LOCK_header->lhb_active_owner)
bug(NULL, "release when not active");
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
LOCK_header->lhb_active_owner = 0;
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
if (ISC_mutex_unlock(MUTEX))
bug(NULL, "semop failed (release)");
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
}
2008-01-17 14:40:35 +01:00
static void release_request(lrq* request)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* r e l e a s e _ r e q u e s t
*
**************************************
*
* Functional description
* Release a request. This is called both by release lock
* and by the cleanup handler.
*
**************************************/
ASSERT_ACQUIRED;
/* Start by disconnecting request from both lock and process */
remove_que(&request->lrq_lbl_requests);
remove_que(&request->lrq_own_requests);
request->lrq_type = type_null;
insert_tail(&LOCK_header->lhb_free_requests, &request->lrq_lbl_requests);
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2001-05-23 15:26:42 +02:00
/* If the request is marked as blocking, clean it up. */
if (request->lrq_flags & LRQ_blocking)
{
remove_que(&request->lrq_own_blocks);
request->lrq_flags &= ~LRQ_blocking;
}
request->lrq_flags &= ~LRQ_blocking_seen;
/* Update counts if we are cleaning up something we're waiting on!
This should only happen if we are purging out an owner that died */
if (request->lrq_flags & LRQ_pending) {
request->lrq_flags &= ~LRQ_pending;
lock->lbl_pending_lrq_count--;
}
/* If there are no outstanding requests, release the lock */
if (SRQ_EMPTY(lock->lbl_requests))
2001-05-23 15:26:42 +02:00
{
CHECK(lock->lbl_pending_lrq_count == 0);
2007-07-25 15:26:13 +02:00
#ifdef VALIDATE_LOCK_TABLE
if (LOCK_header->lhb_active_owner > 0)
validate_parent(LOCK_header, request->lrq_lock);
2007-07-25 15:26:13 +02:00
#endif
2001-05-23 15:26:42 +02:00
remove_que(&lock->lbl_lhb_hash);
remove_que(&lock->lbl_lhb_data);
lock->lbl_type = type_null;
2008-01-16 08:40:12 +01:00
2001-05-23 15:26:42 +02:00
insert_tail(&LOCK_header->lhb_free_locks, &lock->lbl_lhb_hash);
return;
}
/* Re-compute the state of the lock and post any compatible pending
requests. */
if ((request->lrq_state != LCK_none) &&
!(--lock->lbl_counts[request->lrq_state]))
{
lock->lbl_state = lock_state(lock);
if (request->lrq_state != LCK_null)
{
post_pending(lock);
return;
}
}
/* If a lock enqueue failed because of a deadlock or timeout, the pending
request may be holding up compatible, pending lock requests queued
behind it.
Or, even if this request had been granted, it's now being released,
so we might be holding up requests or conversions queued up behind. */
post_pending(lock);
}
#ifdef USE_BLOCKING_THREAD
2008-01-17 14:40:35 +01:00
static void shutdown_blocking_thread(ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* s h u t d o w n _ b l o c k i n g _ t h r e a d
*
**************************************
*
* Functional description
* Perform a controlled shutdown of blocking thread
* to avoid tragic misunderstandings when unmapping
* memory.
*
**************************************/
/* Zeroing the owner offset implicitly flags the
blocking thread to exit. */
LOCK_owner_offset = 0;
/* Poke the blocking action thread to wakeup and notice
the exit flag. In turn, wait on an event ourselves
which the blocking action thread should post on
the way out. If it doesn't post, wakeup after a
timeout and cleanup anyway. */
if (LOCK_owner) {
2008-01-16 08:40:12 +01:00
// Set a marker for the AST thread to know it's time to cleanup
LOCK_owner->own_flags |= OWN_waiting;
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
// wait for AST thread to start (or 5 secs)
startupSemaphore->tryEnter(5);
2008-01-16 08:40:12 +01:00
// Wakeup the AST thread - it might be blocking or stalled
ISC_event_post(&LOCK_owner->own_blocking);
#ifdef PREVENT_OWNER_STARVATION
ISC_event_post(&LOCK_owner->own_stall);
#endif
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
// Tell the scheduler to allow AST's to run
2004-05-17 12:22:34 +02:00
AST_ENABLE();
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
// Wait for the AST thread to finish cleanup or for 5 seconds
cleanupSemaphore->tryEnter(5);
2001-05-23 15:26:42 +02:00
/* Either AST thread terminated, or our timeout expired */
2008-01-16 08:40:12 +01:00
#if defined HAVE_MMAP || defined WIN_NT
2002-11-10 15:29:00 +01:00
ISC_unmap_object(status_vector, &LOCK_data, (UCHAR**) &LOCK_owner,
2003-09-18 12:24:03 +02:00
sizeof(own));
2001-05-23 15:26:42 +02:00
#endif
2008-01-16 08:40:12 +01:00
}
2001-05-23 15:26:42 +02:00
}
#endif
2001-05-23 15:26:42 +02:00
2008-01-17 14:40:35 +01:00
static bool signal_owner(own* blocking_owner, SRQ_PTR blocked_owner_offset)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* s i g n a l _ o w n e r
*
**************************************
*
* Functional description
* Send a signal to a process.
*
* The second parameter is a possible offset to the
* blocked owner (or NULL), which is passed on to
2008-01-16 08:40:12 +01:00
* blocking_action().
2001-05-23 15:26:42 +02:00
*
**************************************/
/*post_history (his_signal, LOCK_header->lhb_iactive_owner, SRQ_REL_PTR (blocking_owner), 0, true);*/
2001-05-23 15:26:42 +02:00
ASSERT_ACQUIRED;
/* If a process, other than ourselves, hasn't yet seen a signal
that was sent, don't bother to send another one. */
DEBUG_DELAY;
if (blocking_owner->own_ast_flags & OWN_signaled)
{
DEBUG_MSG(1,
("signal_owner (%ld) - skipped OWN_signaled\n",
blocking_owner->own_process_id));
2008-01-16 08:40:12 +01:00
return true;
2001-05-23 15:26:42 +02:00
}
blocking_owner->own_ast_flags |= OWN_signaled;
DEBUG_DELAY;
blocking_owner->own_flags &= ~OWN_signal;
2008-01-16 08:40:12 +01:00
#ifndef USE_BLOCKING_THREAD
2001-05-23 15:26:42 +02:00
if (blocking_owner->own_process_id == LOCK_pid) {
DEBUG_DELAY;
2008-01-16 08:40:12 +01:00
blocking_action(SRQ_REL_PTR(blocking_owner), blocked_owner_offset);
2001-05-23 15:26:42 +02:00
DEBUG_DELAY;
2008-01-16 08:40:12 +01:00
return true;
2001-05-23 15:26:42 +02:00
}
#endif
2008-01-16 08:40:12 +01:00
DEBUG_DELAY;
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
#ifdef USE_BLOCKING_THREAD
if (!(errno = ISC_event_post(&blocking_owner->own_blocking)))
return true;
2001-05-23 15:26:42 +02:00
#endif
DEBUG_MSG(1, ("signal_owner - direct delivery failed\n"));
blocking_owner->own_ast_flags &= ~OWN_signaled;
DEBUG_DELAY;
blocking_owner->own_flags |= OWN_signal;
/* Conclude that the owning process is dead */
2008-01-16 08:40:12 +01:00
return false;
2001-05-23 15:26:42 +02:00
}
#ifdef VALIDATE_LOCK_TABLE
2004-05-17 17:14:10 +02:00
const USHORT EXPECT_inuse = 0;
const USHORT EXPECT_freed = 1;
2001-05-23 15:26:42 +02:00
2004-05-17 17:14:10 +02:00
const USHORT RECURSE_yes = 0;
const USHORT RECURSE_not = 1;
2001-05-23 15:26:42 +02:00
2007-07-29 04:32:41 +02:00
static void validate_history(const SRQ_PTR history_header)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* v a l i d a t e _ h i s t o r y
*
**************************************
*
* Functional description
* Validate a circular list of history blocks.
*
**************************************/
2003-12-31 06:36:12 +01:00
ULONG count = 0;
2001-05-23 15:26:42 +02:00
LOCK_TRACE(("validate_history: %ld\n", history_header));
2008-01-16 08:40:12 +01:00
for (const his* history = (his*) SRQ_ABS_PTR(history_header); true;
history = (his*) SRQ_ABS_PTR(history->his_next))
2003-12-31 06:36:12 +01:00
{
2001-05-23 15:26:42 +02:00
count++;
CHECK(history->his_type == type_his);
2003-04-02 13:03:31 +02:00
// The following condition is always true because UCHAR >= 0
// CHECK(history->his_operation >= 0);
2001-05-23 15:26:42 +02:00
CHECK(history->his_operation <= his_MAX);
if (history->his_next == history_header)
break;
CHECK(count <= HISTORY_BLOCKS);
}
}
#endif
2007-07-25 15:26:13 +02:00
#ifdef VALIDATE_LOCK_TABLE
2007-07-29 04:32:41 +02:00
static void validate_parent(const lhb* alhb, const SRQ_PTR isSomeoneParent)
2007-07-25 15:26:13 +02:00
{
/**************************************
*
* v a l i d a t e _ p a r e n t
*
**************************************
*
* Functional description
* Validate lock under release not to be someone parent.
*
**************************************/
2008-01-16 08:40:12 +01:00
if (alhb->lhb_active_owner == 0)
return;
2007-07-25 15:26:13 +02:00
2007-07-29 04:32:41 +02:00
const own* owner = (own*) SRQ_ABS_PTR(alhb->lhb_active_owner);
2007-07-25 15:26:13 +02:00
2007-07-29 04:32:41 +02:00
const srq* lock_srq;
2007-07-25 15:26:13 +02:00
SRQ_LOOP(owner->own_requests, lock_srq)
{
2008-01-16 14:07:46 +01:00
const lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_own_requests));
2007-07-25 15:26:13 +02:00
if (!(request->lrq_flags & LRQ_repost))
{
if (request->lrq_lock != isSomeoneParent)
{
2008-01-16 14:07:46 +01:00
const lbl* lock = (lbl*) SRQ_ABS_PTR(request->lrq_lock);
2007-07-25 15:26:13 +02:00
if (lock->lbl_parent == isSomeoneParent)
{
bug_assert ("deleting someone's parent", __LINE__);
}
}
}
}
}
#endif
2001-05-23 15:26:42 +02:00
#ifdef VALIDATE_LOCK_TABLE
2007-07-29 04:32:41 +02:00
static void validate_lhb(const lhb* alhb)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* v a l i d a t e _ l h b
*
**************************************
*
* Functional description
* Validate the LHB structure and everything that hangs off of it.
*
**************************************/
LOCK_TRACE(("validate_lhb:\n"));
/* Prevent recursive reporting of validation errors */
if (LOCK_bugcheck)
return;
2007-07-29 04:32:41 +02:00
CHECK(alhb != NULL);
CHECK(alhb->lhb_type == type_lhb);
CHECK(alhb->lhb_version == LHB_VERSION);
2001-05-23 15:26:42 +02:00
2007-07-29 04:32:41 +02:00
validate_shb(alhb->lhb_secondary);
if (alhb->lhb_active_owner > 0)
validate_owner(alhb->lhb_active_owner, EXPECT_inuse);
2001-05-23 15:26:42 +02:00
2007-07-29 04:32:41 +02:00
const srq* lock_srq;
SRQ_LOOP(alhb->lhb_owners, lock_srq) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que_next = SRQ_NEXT((*lock_srq));
CHECK(que_next->srq_backward == SRQ_REL_PTR(lock_srq));
2001-05-23 15:26:42 +02:00
2007-07-29 04:32:41 +02:00
const own* owner = (own*) ((UCHAR *) lock_srq - OFFSET(own*, own_lhb_owners));
validate_owner(SRQ_REL_PTR(owner), EXPECT_inuse);
2001-05-23 15:26:42 +02:00
}
2007-07-29 04:32:41 +02:00
SRQ_LOOP(alhb->lhb_free_owners, lock_srq) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que_next = SRQ_NEXT((*lock_srq));
CHECK(que_next->srq_backward == SRQ_REL_PTR(lock_srq));
2001-05-23 15:26:42 +02:00
2007-07-29 04:32:41 +02:00
const own* owner = (own*) ((UCHAR *) lock_srq - OFFSET(own*, own_lhb_owners));
validate_owner(SRQ_REL_PTR(owner), EXPECT_freed);
2001-05-23 15:26:42 +02:00
}
2007-07-29 04:32:41 +02:00
SRQ_LOOP(alhb->lhb_free_locks, lock_srq) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que_next = SRQ_NEXT((*lock_srq));
CHECK(que_next->srq_backward == SRQ_REL_PTR(lock_srq));
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
const lbl* lock = (lbl*) ((UCHAR *) lock_srq - OFFSET(lbl*, lbl_lhb_hash));
validate_lock(SRQ_REL_PTR(lock), EXPECT_freed, (SRQ_PTR) 0);
2001-05-23 15:26:42 +02:00
}
2007-07-29 04:32:41 +02:00
SRQ_LOOP(alhb->lhb_free_requests, lock_srq) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que_next = SRQ_NEXT((*lock_srq));
CHECK(que_next->srq_backward == SRQ_REL_PTR(lock_srq));
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
const lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_lbl_requests));
validate_request(SRQ_REL_PTR(request), EXPECT_freed, RECURSE_not);
2001-05-23 15:26:42 +02:00
}
2007-07-29 04:32:41 +02:00
CHECK(alhb->lhb_used <= alhb->lhb_length);
2001-05-23 15:26:42 +02:00
2008-01-16 14:07:46 +01:00
validate_history(alhb->lhb_history);
2001-05-23 15:26:42 +02:00
DEBUG_MSG(0, ("validate_lhb completed:\n"));
}
#endif
#ifdef VALIDATE_LOCK_TABLE
2007-07-29 04:32:41 +02:00
static void validate_lock(const SRQ_PTR lock_ptr, USHORT freed, const SRQ_PTR lrq_ptr)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* v a l i d a t e _ l o c k
*
**************************************
*
* Functional description
* Validate the lock structure and everything that hangs off of it.
*
**************************************/
LOCK_TRACE(("validate_lock: %ld\n", lock_ptr));
2008-01-16 08:40:12 +01:00
const lbl* lock = (lbl*) SRQ_ABS_PTR(lock_ptr);
2001-05-23 15:26:42 +02:00
if (freed == EXPECT_freed)
CHECK(lock->lbl_type == type_null)
2007-07-29 04:32:41 +02:00
else
2001-05-23 15:26:42 +02:00
CHECK(lock->lbl_type == type_lbl);
2007-08-02 04:00:37 +02:00
// The following condition is always true because UCHAR >= 0
// CHECK(lock->lbl_state >= LCK_none);
2001-05-23 15:26:42 +02:00
CHECK(lock->lbl_state < LCK_max);
CHECK(lock->lbl_length <= lock->lbl_size);
2007-08-02 04:00:37 +02:00
// The lbl_count's should never roll over to be negative
2007-07-29 04:32:41 +02:00
for (ULONG i = 0; i < FB_NELEM(lock->lbl_counts); i++)
2001-05-23 15:26:42 +02:00
CHECK(!(lock->lbl_counts[i] & 0x8000))
2007-08-02 04:00:37 +02:00
// The count of pending locks should never roll over to be negative
CHECK(!(lock->lbl_pending_lrq_count & 0x8000));
2001-05-23 15:26:42 +02:00
2004-12-16 04:03:13 +01:00
USHORT direct_counts[LCK_max];
2001-05-23 15:26:42 +02:00
memset(direct_counts, 0, sizeof(direct_counts));
2007-07-29 04:32:41 +02:00
ULONG found = 0;
ULONG found_pending = 0;
2004-12-16 04:03:13 +01:00
UCHAR highest_request = LCK_none;
2007-07-29 04:32:41 +02:00
const srq* lock_srq;
SRQ_LOOP(lock->lbl_requests, lock_srq) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que_next = SRQ_NEXT((*lock_srq));
CHECK(que_next->srq_backward == SRQ_REL_PTR(lock_srq));
2001-05-23 15:26:42 +02:00
/* Any requests of a freed lock should also be free */
CHECK(freed == EXPECT_inuse);
2008-01-16 08:40:12 +01:00
const lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_lbl_requests));
2001-05-23 15:26:42 +02:00
/* Note: Don't try to validate_request here, it leads to recursion */
if (SRQ_REL_PTR(request) == lrq_ptr)
2001-05-23 15:26:42 +02:00
found++;
CHECK(found <= 1); /* check for a loop in the queue */
/* Request must be for this lock */
CHECK(request->lrq_lock == lock_ptr);
if (request->lrq_requested > highest_request)
highest_request = request->lrq_requested;
/* If the request is pending, then it must be incompatible with current
state of the lock - OR lock_ordering is enabled and there is at
least one pending request in the queue (before this request
but not including it). */
if (request->lrq_flags & LRQ_pending) {
2008-01-16 08:40:12 +01:00
CHECK(!compatibility[request->lrq_requested][lock->lbl_state] ||
(lockOrdering() && found_pending));
2001-05-23 15:26:42 +02:00
/* The above condition would probably be more clear if we
wrote it as the following:
2008-01-16 08:40:12 +01:00
CHECK (!compatibility[request->lrq_requested][lock->lbl_state] ||
(lockOrdering() && found_pending &&
compatibility[request->lrq_requested][lock->lbl_state]));
2001-05-23 15:26:42 +02:00
but that would be redundant
*/
found_pending++;
}
/* If the request is NOT pending, then it must be rejected or
compatible with the current state of the lock */
if (!(request->lrq_flags & LRQ_pending)) {
CHECK((request->lrq_flags & LRQ_rejected) ||
(request->lrq_requested == lock->lbl_state) ||
2008-01-16 08:40:12 +01:00
compatibility[request->lrq_requested][lock->lbl_state]);
2001-05-23 15:26:42 +02:00
}
direct_counts[request->lrq_state]++;
}
if ((freed == EXPECT_inuse) && (lrq_ptr != 0))
CHECK(found == 1); /* request is in lock's queue */
if (freed == EXPECT_inuse) {
CHECK(found_pending == lock->lbl_pending_lrq_count);
/* The counter in the lock header should match the actual counts
lock->lbl_counts [LCK_null] isn't maintained, so don't check it */
2004-12-16 04:03:13 +01:00
for (USHORT j = LCK_null; j < LCK_max; j++)
CHECK(direct_counts[j] == lock->lbl_counts[j]);
2001-05-23 15:26:42 +02:00
}
if (lock->lbl_parent && (freed == EXPECT_inuse))
validate_lock(lock->lbl_parent, EXPECT_inuse, (SRQ_PTR) 0);
2001-05-23 15:26:42 +02:00
}
#endif
#ifdef VALIDATE_LOCK_TABLE
2007-07-29 04:32:41 +02:00
static void validate_owner(const SRQ_PTR own_ptr, USHORT freed)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* v a l i d a t e _ o w n e r
*
**************************************
*
* Functional description
* Validate the owner structure and everything that hangs off of it.
*
**************************************/
LOCK_TRACE(("validate_owner: %ld\n", own_ptr));
2007-07-29 04:32:41 +02:00
const own* owner = (own*) SRQ_ABS_PTR(own_ptr);
2004-12-16 04:03:13 +01:00
//own owner_copy = *owner;
2001-05-23 15:26:42 +02:00
/* Note that owner->own_pending_request can be reset without the lock
* table being acquired - eg: by another process. That being the case,
* we need to stash away a copy of it for validation.
*/
2007-07-29 04:32:41 +02:00
const SRQ_PTR owner_own_pending_request = owner->own_pending_request;
2001-05-23 15:26:42 +02:00
CHECK(owner->own_type == type_own);
if (freed == EXPECT_freed)
CHECK(owner->own_owner_type == 0)
else {
CHECK(owner->own_owner_type > 0);
2008-01-16 08:40:12 +01:00
CHECK(owner->own_owner_type <= 4); /* LCK_OWNER_transaction */
2001-05-23 15:26:42 +02:00
}
CHECK(owner->own_acquire_time <= LOCK_header->lhb_acquires);
/* Check that no invalid flag bit is set */
CHECK(!
2004-12-16 04:03:13 +01:00
(owner->own_flags &
2008-01-16 08:40:12 +01:00
~(OWN_blocking | OWN_scanned | OWN_waiting | OWN_signal
2004-12-16 04:03:13 +01:00
| OWN_wakeup | OWN_starved)));
2001-05-23 15:26:42 +02:00
/* Check that no invalid flag bit is set */
CHECK(!(owner->own_ast_flags & ~(OWN_signaled)));
/* Check that no invalid flag bit is set */
CHECK(!(owner->own_ast_hung_flags & ~(OWN_hung)));
/* Can't both be signal & signaled */
if (owner->own_flags & OWN_signal)
CHECK(!(owner->own_ast_flags & OWN_signaled));
if (owner->own_ast_flags & OWN_signaled)
CHECK(!(owner->own_flags & OWN_signal));
2007-07-29 04:32:41 +02:00
const srq* lock_srq;
SRQ_LOOP(owner->own_requests, lock_srq) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que_next = SRQ_NEXT((*lock_srq));
CHECK(que_next->srq_backward == SRQ_REL_PTR(lock_srq));
2001-05-23 15:26:42 +02:00
CHECK(freed == EXPECT_inuse); /* should not be in loop for freed owner */
2008-01-16 08:40:12 +01:00
const lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_own_requests));
validate_request(SRQ_REL_PTR(request), EXPECT_inuse, RECURSE_not);
2001-05-23 15:26:42 +02:00
CHECK(request->lrq_owner == own_ptr);
/* Make sure that request marked as blocking also exists in the blocking list */
if (request->lrq_flags & LRQ_blocking) {
2007-07-29 04:32:41 +02:00
ULONG found = 0;
const srq* que2;
SRQ_LOOP(owner->own_blocks, que2) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que2_next = SRQ_NEXT((*que2));
CHECK(que2_next->srq_backward == SRQ_REL_PTR(que2));
2001-05-23 15:26:42 +02:00
2007-07-29 04:32:41 +02:00
const lrq* request2 =
2008-01-16 08:40:12 +01:00
(lrq*) ((UCHAR *) que2 - OFFSET(lrq*, lrq_own_blocks));
2001-05-23 15:26:42 +02:00
CHECK(request2->lrq_owner == own_ptr);
if (SRQ_REL_PTR(request2) == SRQ_REL_PTR(request))
2001-05-23 15:26:42 +02:00
found++;
CHECK(found <= 1); /* watch for loops in queue */
}
CHECK(found == 1); /* request marked as blocking must be in blocking queue */
}
}
/* Check each item in the blocking queue */
SRQ_LOOP(owner->own_blocks, lock_srq) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que_next = SRQ_NEXT((*lock_srq));
CHECK(que_next->srq_backward == SRQ_REL_PTR(lock_srq));
2001-05-23 15:26:42 +02:00
CHECK(freed == EXPECT_inuse); /* should not be in loop for freed owner */
2008-01-16 08:40:12 +01:00
const lrq* request = (lrq*) ((UCHAR *) lock_srq - OFFSET(lrq*, lrq_own_blocks));
validate_request(SRQ_REL_PTR(request), EXPECT_inuse, RECURSE_not);
2001-05-23 15:26:42 +02:00
LOCK_TRACE(("Validate own_block: %ld\n", SRQ_REL_PTR(request)));
2001-05-23 15:26:42 +02:00
CHECK(request->lrq_owner == own_ptr);
/* A repost won't be in the request list */
if (request->lrq_flags & LRQ_repost)
continue;
/* Make sure that each block also exists in the request list */
2007-07-29 04:32:41 +02:00
ULONG found = 0;
const srq* que2;
SRQ_LOOP(owner->own_requests, que2) {
2001-05-23 15:26:42 +02:00
/* Validate that the next backpointer points back to us */
2007-07-29 04:32:41 +02:00
const srq* que2_next = SRQ_NEXT((*que2));
CHECK(que2_next->srq_backward == SRQ_REL_PTR(que2));
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
const lrq* request2 = (lrq*) ((UCHAR *) que2 - OFFSET(lrq*, lrq_own_requests));
2001-05-23 15:26:42 +02:00
CHECK(request2->lrq_owner == own_ptr);
if (SRQ_REL_PTR(request2) == SRQ_REL_PTR(request))
2001-05-23 15:26:42 +02:00
found++;
CHECK(found <= 1); /* watch for loops in queue */
}
CHECK(found == 1); /* blocking request must be in request queue */
}
/* If there is a pending request, make sure it is valid, that it
* exists in the queue for the lock.
*/
if (owner_own_pending_request && (freed == EXPECT_inuse)) {
/* Make sure pending request is valid, and we own it */
2008-01-16 08:40:12 +01:00
const lrq* request3 = (lrq*) SRQ_ABS_PTR(owner_own_pending_request);
2004-05-12 21:23:17 +02:00
validate_request(SRQ_REL_PTR(request3), EXPECT_inuse, RECURSE_not);
CHECK(request3->lrq_owner == own_ptr);
2001-05-23 15:26:42 +02:00
/* Make sure the lock the request is for is valid */
2008-01-16 08:40:12 +01:00
const lbl* lock = (lbl*) SRQ_ABS_PTR(request3->lrq_lock);
validate_lock(SRQ_REL_PTR(lock), EXPECT_inuse, (SRQ_PTR) 0);
2001-05-23 15:26:42 +02:00
/* Make sure the pending request is on the list of requests for the lock */
2004-12-16 04:03:13 +01:00
bool found_pending = false;
2007-07-29 04:32:41 +02:00
const srq* que_of_lbl_requests;
SRQ_LOOP(lock->lbl_requests, que_of_lbl_requests) {
2007-07-29 04:32:41 +02:00
const lrq* pending =
2008-01-16 08:40:12 +01:00
(lrq*) ((UCHAR *) que_of_lbl_requests -
OFFSET(lrq*, lrq_lbl_requests));
if (SRQ_REL_PTR(pending) == owner_own_pending_request) {
2003-09-04 23:26:15 +02:00
found_pending = true;
2001-05-23 15:26:42 +02:00
break;
}
}
/* pending request must exist in the lock's request queue */
CHECK(found_pending);
/* Either the pending request is the same as what we stashed away, or it's
* been cleared by another process without the lock table acquired. */
CHECK((owner_own_pending_request == owner->own_pending_request) ||
!owner->own_pending_request);
}
}
#endif
#ifdef VALIDATE_LOCK_TABLE
2007-07-29 04:32:41 +02:00
static void validate_request(const SRQ_PTR lrq_ptr, USHORT freed, USHORT recurse)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* v a l i d a t e _ r e q u e s t
*
**************************************
*
* Functional description
* Validate the request structure and everything that hangs off of it.
*
**************************************/
LOCK_TRACE(("validate_request: %ld\n", lrq_ptr));
2008-01-16 08:40:12 +01:00
const lrq* request = (lrq*) SRQ_ABS_PTR(lrq_ptr);
2001-05-23 15:26:42 +02:00
if (freed == EXPECT_freed)
CHECK(request->lrq_type == type_null)
2007-07-29 04:32:41 +02:00
else
2001-05-23 15:26:42 +02:00
CHECK(request->lrq_type == type_lrq);
/* Check that no invalid flag bit is set */
CHECK(!
2004-12-16 04:03:13 +01:00
(request->lrq_flags &
~(LRQ_blocking | LRQ_pending | LRQ_converting |
LRQ_rejected | LRQ_timed_out | LRQ_deadlock |
LRQ_repost | LRQ_scanned | LRQ_blocking_seen)));
2001-05-23 15:26:42 +02:00
/* LRQ_converting & LRQ_timed_out are defined, but never actually used */
CHECK(!(request->lrq_flags & (LRQ_converting | LRQ_timed_out)));
/* Once a request is rejected, it CAN'T be pending any longer */
if (request->lrq_flags & LRQ_rejected)
CHECK(!(request->lrq_flags & LRQ_pending));
/* Can't both be scanned & marked for deadlock walk */
CHECK((request->lrq_flags & (LRQ_deadlock | LRQ_scanned)) !=
(LRQ_deadlock | LRQ_scanned));
CHECK(request->lrq_requested < LCK_max);
CHECK(request->lrq_state < LCK_max);
2007-07-29 04:32:41 +02:00
if (freed == EXPECT_inuse)
{
2001-05-23 15:26:42 +02:00
if (recurse == RECURSE_yes)
validate_owner(request->lrq_owner, EXPECT_inuse);
/* Reposted request are pseudo requests, not attached to any real lock */
if (!(request->lrq_flags & LRQ_repost))
validate_lock(request->lrq_lock, EXPECT_inuse, SRQ_REL_PTR(request));
2001-05-23 15:26:42 +02:00
}
}
#endif
#ifdef VALIDATE_LOCK_TABLE
2007-07-29 04:32:41 +02:00
static void validate_shb(const SRQ_PTR shb_ptr)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* v a l i d a t e _ s h b
*
**************************************
*
* Functional description
* Validate the SHB structure and everything that hangs off of
* it.
* Of course, it would have been a VERY good thing if someone
* had moved this into lhb when we made a unique v4 lock
* manager....
* 1995-April-13 David Schnepper
*
**************************************/
LOCK_TRACE(("validate_shb: %ld\n", shb_ptr));
2008-01-16 08:40:12 +01:00
const shb* secondary_header = (shb*) SRQ_ABS_PTR(shb_ptr);
2001-05-23 15:26:42 +02:00
2003-09-18 12:24:03 +02:00
CHECK(secondary_header->shb_type == type_shb);
2001-05-23 15:26:42 +02:00
2003-09-18 12:24:03 +02:00
validate_history(secondary_header->shb_history);
2001-05-23 15:26:42 +02:00
}
#endif
2008-01-17 14:40:35 +01:00
static USHORT wait_for_request(lrq* request,
SSHORT lck_wait,
ISC_STATUS* status_vector)
2001-05-23 15:26:42 +02:00
{
/**************************************
*
* w a i t _ f o r _ r e q u e s t
*
**************************************
*
* Functional description
* There is a request that needs satisfaction, but is waiting for
* somebody else. Mark the request as pending and go to sleep until
* the lock gets poked. When we wake up, see if somebody else has
* cleared the pending flag. If not, go back to sleep.
* Returns
* FB_SUCCESS - we waited until the request was granted or rejected
* FB_FAILURE - Insufficient resouces to wait (eg: no semaphores)
2001-05-23 15:26:42 +02:00
*
**************************************/
ASSERT_ACQUIRED;
++LOCK_header->lhb_waits;
2004-12-16 04:03:13 +01:00
const SLONG scan_interval = LOCK_header->lhb_scan_interval;
2001-05-23 15:26:42 +02:00
/* lrq_count will be off if we wait for a pending request */
CHECK(!(request->lrq_flags & LRQ_pending));
request->lrq_flags &= ~LRQ_rejected;
request->lrq_flags |= LRQ_pending;
SRQ_PTR owner_offset = request->lrq_owner;
SRQ_PTR lock_offset = request->lrq_lock;
2008-01-16 08:40:12 +01:00
lbl* lock = (lbl*) SRQ_ABS_PTR(lock_offset);
2001-05-23 15:26:42 +02:00
lock->lbl_pending_lrq_count++;
2008-01-16 08:40:12 +01:00
if (lockOrdering()) {
2001-05-23 15:26:42 +02:00
if (!request->lrq_state) {
/* If ordering is in effect, and this is a conversion of
an existing lock in LCK_none state - put the lock to the
end of the list so it's not taking cuts in the lineup */
remove_que(&request->lrq_lbl_requests);
insert_tail(&lock->lbl_requests, &request->lrq_lbl_requests);
}
}
own* owner = (own*) SRQ_ABS_PTR(owner_offset);
SRQ_PTR request_offset = SRQ_REL_PTR(request);
2003-12-31 06:36:12 +01:00
owner->own_pending_request = request_offset;
2001-05-23 15:26:42 +02:00
owner->own_flags &= ~(OWN_scanned | OWN_wakeup);
2008-01-16 08:40:12 +01:00
owner->own_flags |= OWN_waiting;
2001-05-23 15:26:42 +02:00
/* Post blockage. If the blocking owner has disappeared, the blockage
may clear spontaneously. */
2003-09-04 23:26:15 +02:00
post_blockage(request, lock, false);
post_history(his_wait, owner_offset, lock_offset, SRQ_REL_PTR(request), true);
2001-05-23 15:26:42 +02:00
release(owner_offset);
2008-01-16 08:40:12 +01:00
time_t current_time = time(NULL);
2001-05-23 15:26:42 +02:00
/* If a lock timeout was requested (wait < 0) then figure
out the time when the lock request will timeout */
2008-01-16 08:40:12 +01:00
const time_t lock_timeout = (lck_wait < 0) ? current_time + (-lck_wait) : 0;
time_t deadlock_timeout = current_time + scan_interval;
2001-05-23 15:26:42 +02:00
/* Wait in a loop until the lock becomes available */
2003-12-31 06:36:12 +01:00
#ifdef DEV_BUILD
ULONG repost_counter = 0;
#endif
2004-12-16 04:03:13 +01:00
int ret;
2003-09-04 23:26:15 +02:00
while (true) {
2001-05-23 15:26:42 +02:00
/* NOTE: Many operations in this loop are done without having
* the lock table acquired - for performance reasons
*/
/* Before starting to wait - look to see if someone resolved
the request for us - if so we're out easy! */
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
if (!(request->lrq_flags & LRQ_pending)) {
break;
2001-05-23 15:26:42 +02:00
}
/* recalculate when we next want to wake up, the lesser of a
deadlock scan interval or when the lock request wanted a timeout */
2008-01-16 08:40:12 +01:00
time_t timeout = deadlock_timeout;
2001-05-23 15:26:42 +02:00
if (lck_wait < 0 && lock_timeout < deadlock_timeout)
timeout = lock_timeout;
/* Prepare to wait for a timeout or a wakeup from somebody else. */
owner = (own*) SRQ_ABS_PTR(owner_offset);
2008-01-16 08:40:12 +01:00
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
2001-05-23 15:26:42 +02:00
if (!(LOCK_owner->own_flags & OWN_wakeup))
#else
if (!(owner->own_flags & OWN_wakeup))
#endif
{
/* Re-initialize value each time thru the loop to make sure that the
semaphore looks 'un-poked'. */
/* YYY: NOTE - couldn't there be "missing events" here? */
/* We don't own the lock_table at this point, but we modify it! */
2004-12-16 04:03:13 +01:00
SLONG value;
event_t* event_ptr;
2008-01-16 08:40:12 +01:00
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
event_ptr = &LOCK_owner->own_wakeup;
2001-05-23 15:26:42 +02:00
value = ISC_event_clear(event_ptr);
2008-01-16 08:40:12 +01:00
event_ptr = &LOCK_owner->own_wakeup;
2001-05-23 15:26:42 +02:00
#else
2008-01-16 08:40:12 +01:00
event_ptr = &owner->own_wakeup;
2001-05-23 15:26:42 +02:00
value = ISC_event_clear(event_ptr);
owner = (own*) SRQ_ABS_PTR(owner_offset);
2008-01-16 08:40:12 +01:00
event_ptr = &owner->own_wakeup;
2001-05-23 15:26:42 +02:00
#endif
2008-01-16 08:40:12 +01:00
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
2001-05-23 15:26:42 +02:00
if (!(LOCK_owner->own_flags & OWN_wakeup))
#else
if (!(owner->own_flags & OWN_wakeup))
#endif
{
/* Until here we've been the only thread in the engine (We no longer
release engine in LCK.C module to avoid problems with more than
one thread running in out not-yet-MT-safe engine). We already
tried to execute AST routine provided (sometimes) by blocking
owner hoping our lock request would be satisfied. Did not help!
The only thing we could do now is to wait. But let's do it without
monopolizing the engine
*/
#ifdef SUPERSERVER
2004-05-15 02:58:46 +02:00
THREAD_EXIT();
2001-05-23 15:26:42 +02:00
#endif
2004-05-17 12:22:34 +02:00
AST_ENABLE();
2001-05-23 15:26:42 +02:00
ret = ISC_event_wait(1, &event_ptr, &value,
2008-01-16 08:40:12 +01:00
(timeout - current_time) * 1000000);
2004-05-17 12:22:34 +02:00
AST_DISABLE();
2001-05-23 15:26:42 +02:00
#ifdef SUPERSERVER
2004-05-15 02:58:46 +02:00
THREAD_ENTER();
2001-05-23 15:26:42 +02:00
#endif
}
}
2008-01-16 08:40:12 +01:00
/* We've woken up from the wait - now look around and see
2001-05-23 15:26:42 +02:00
why we wokeup */
/* If somebody else has resolved the lock, we're done */
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
if (!(request->lrq_flags & LRQ_pending)) {
break;
2001-05-23 15:26:42 +02:00
}
/* See if we wokeup due to another owner deliberately waking us up
ret==FB_SUCCESS --> we were deliberately worken up
ret==FB_FAILURE --> we still don't know why we work up */
2001-05-23 15:26:42 +02:00
/* Only if we came out of the ISC_event_wait() because of a post_wakeup()
by another owner is OWN_wakeup set. This is the only FB_SUCCESS case. */
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
#if !defined SUPERSERVER && (defined HAVE_MMAP || defined WIN_NT)
2001-05-23 15:26:42 +02:00
if (LOCK_owner->own_flags & OWN_wakeup)
#else
owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
if (owner->own_flags & OWN_wakeup)
#endif
ret = FB_SUCCESS;
2001-05-23 15:26:42 +02:00
else
ret = FB_FAILURE;
2001-05-23 15:26:42 +02:00
2008-01-16 08:40:12 +01:00
current_time = time(NULL);
2001-05-23 15:26:42 +02:00
/* See if we workup for a bogus reason - if so
go right back to sleep. We wokeup bogus unless
- we weren't deliberatly woken up
- it's not time for a deadlock scan.
- it's not time for the lock timeout to expire.
Bogus reasons for wakeups include signal reception on some
platforms (eg: SUN4)
Note: we allow a 1 second leaway on declaring a bogus
wakeup due to timing differences (we use seconds here,
ISC_event_wait() uses finer granularity) */
if ((ret != FB_SUCCESS) && (current_time + 1 < timeout)) {
2001-05-23 15:26:42 +02:00
continue;
}
/* We apparently woke up for some real reason.
Make sure everyone is still with us. Then see if we're still
blocked. */
acquire(owner_offset);
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset); /* Re-init after potential remap */
lock = (lbl*) SRQ_ABS_PTR(lock_offset);
owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
owner->own_flags &= ~OWN_wakeup;
/* Now that we own the lock table, see if the request was resolved
while we were waiting for the lock table */
if (!(request->lrq_flags & LRQ_pending)) {
release(owner_offset);
break;
}
2008-01-16 08:40:12 +01:00
current_time = time(NULL); /* refetch due to wait in acquire() */
2001-05-23 15:26:42 +02:00
/* See if we've waited beyond the lock timeout - if so we
mark our own request as rejected */
if (lck_wait < 0 && lock_timeout <= current_time)
{
/* We're going to reject our lock - it's the callers responsibility
to do cleanup and make sure post_pending() is called to wakeup
other owners we might be blocking */
request->lrq_flags |= LRQ_rejected;
request->lrq_flags &= ~LRQ_pending;
lock->lbl_pending_lrq_count--;
release(owner_offset);
break;
}
/* We're going to do some real work - reset when we next want to
do a deadlock scan */
deadlock_timeout = current_time + scan_interval;
2008-01-16 08:40:12 +01:00
lrq* blocking_request;
2001-05-23 15:26:42 +02:00
/* Handle lock event first */
if (ret == FB_SUCCESS)
2001-05-23 15:26:42 +02:00
{
/* Someone posted our wakeup event, but DIDN'T grant our request.
Re-post what we're blocking on and continue to wait.
This could happen if the lock was granted to a different request,
we have to tell the new owner of the lock that they are blocking us. */
2003-09-04 23:26:15 +02:00
post_blockage(request, lock, false);
release(owner_offset);
continue;
2001-05-23 15:26:42 +02:00
}
#ifndef SUPERSERVER
/* See if all the other owners are still alive. Dead ones will be purged,
purging one might resolve our lock request. */
2003-03-25 13:07:05 +01:00
/* Do not do rescan of owners if we received notification that
blocking ASTs have completed - will do it next time if needed */
else if (probe_owners(owner_offset) &&
2001-05-23 15:26:42 +02:00
!(request->lrq_flags & LRQ_pending))
{
release(owner_offset);
break;
}
#endif /* SUPERSERVER */
/* If we've not previously been scanned for a deadlock, go do a
deadlock scan */
else if (!(owner->own_flags & OWN_scanned) &&
(blocking_request = deadlock_scan(owner, request)))
{
/* Something has been selected for rejection to prevent a
deadlock. Clean things up and go on. We still have to
wait for our request to be resolved. */
DEBUG_MSG(0, ("wait_for_request: selecting something for deadlock kill\n"));
++LOCK_header->lhb_deadlocks;
blocking_request->lrq_flags |= LRQ_rejected;
blocking_request->lrq_flags &= ~LRQ_pending;
2008-01-16 08:40:12 +01:00
lbl* blocking_lock = (lbl*) SRQ_ABS_PTR(blocking_request->lrq_lock);
2001-05-23 15:26:42 +02:00
blocking_lock->lbl_pending_lrq_count--;
own* blocking_owner = (own*) SRQ_ABS_PTR(blocking_request->lrq_owner);
2001-05-23 15:26:42 +02:00
blocking_owner->own_pending_request = 0;
blocking_owner->own_flags &= ~OWN_scanned;
if (blocking_request != request)
post_wakeup(blocking_owner);
/* else
We rejected our own request to avoid a deadlock.
When we get back to the top of the master loop we
fall out and start cleaning up */
}
else
2001-05-23 15:26:42 +02:00
{
/* Our request is not resolved, all the owners are alive, there's
no deadlock -- there's nothing else to do. Let's
make sure our request hasn't been forgotten by reminding
all the owners we're waiting - some plaforms under CLASSIC
architecture had problems with "missing signals" - which is
another reason to repost the blockage.
Also, the ownership of the lock could have changed, and we
weren't woken up because we weren't next in line for the lock.
We need to inform the new owner. */
DEBUG_MSG(0, ("wait_for_request: forcing a resignal of blockers\n"));
2003-09-04 23:26:15 +02:00
post_blockage(request, lock, false);
2001-05-23 15:26:42 +02:00
#ifdef DEV_BUILD
repost_counter++;
if (repost_counter % 50 == 0) {
gds__log("wait_for_request: owner %d reposted %ld times for lock %d",
owner_offset,
repost_counter,
lock_offset);
DEBUG_MSG(0,
("wait_for_request: reposted %ld times for this lock!\n",
repost_counter));
}
#endif /* DEV_BUILD */
}
release(owner_offset);
}
/* NOTE: lock table is not acquired at this point */
#ifdef DEV_BUILD
2008-01-16 08:40:12 +01:00
request = (lrq*) SRQ_ABS_PTR(request_offset);
2001-05-23 15:26:42 +02:00
CHECK(!(request->lrq_flags & LRQ_pending));
#endif /* DEV_BUILD */
owner = (own*) SRQ_ABS_PTR(owner_offset);
2001-05-23 15:26:42 +02:00
owner->own_pending_request = 0;
2008-01-16 08:40:12 +01:00
owner->own_flags &= ~OWN_waiting;
2001-05-23 15:26:42 +02:00
return FB_SUCCESS;
2001-05-23 15:26:42 +02:00
}