8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-31 15:23:03 +01:00
firebird-mirror/src/jrd/cch.h

307 lines
9.6 KiB
C
Raw Normal View History

2001-05-23 15:26:42 +02:00
/*
* PROGRAM: JRD access method
* MODULE: cch.h
* DESCRIPTION: Cache manager definitions
*
* The contents of this file are subject to the Interbase Public
* License Version 1.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy
* of the License at http://www.Inprise.com/IPL.html
*
* Software distributed under the License is distributed on an
* "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express
* or implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The Original Code was created by Inprise Corporation
* and its predecessors. Portions created by Inprise Corporation are
* Copyright (C) Inprise Corporation.
*
* All Rights Reserved.
* Contributor(s): ______________________________________.
*/
#ifndef JRD_CCH_H
#define JRD_CCH_H
2001-05-23 15:26:42 +02:00
2001-12-24 03:51:06 +01:00
#include "../include/fb_blk.h"
#include "../jrd/os/pio.h"
#include "../common/classes/semaphore.h"
#ifdef SUPERSERVER_V2
#include "../jrd/sbm.h"
2006-05-22 00:07:35 +02:00
#include "../jrd/pag.h"
#endif
#include "../jrd/que.h"
#include "../jrd/lls.h"
#include "../jrd/pag.h"
#include "../jrd/isc.h"
//#define CCH_DEBUG
#ifdef CCH_DEBUG
DEFINE_TRACE_ROUTINE(cch_trace);
#define CCH_TRACE(args) cch_trace args
#define CCH_TRACE_AST(message) gds__trace(message)
#else
#define CCH_TRACE(args) /* nothing */
#define CCH_TRACEE_AST(message) /* nothing */
#endif
struct exp_index_buf;
namespace Ods {
struct pag;
}
namespace Jrd {
2001-12-24 03:51:06 +01:00
class Lock;
class Precedence;
class thread_db;
2004-03-07 08:58:55 +01:00
struct que;
class BufferDesc;
2004-04-10 02:25:22 +02:00
class Database;
2001-05-23 15:26:42 +02:00
/* Page buffer cache size constraints. */
const ULONG MIN_PAGE_BUFFERS = 50;
const ULONG MAX_PAGE_BUFFERS = 131072;
#define DIRTY_LIST
//#define DIRTY_TREE
#ifdef DIRTY_TREE
/* AVL-balanced tree node */
2008-05-10 05:44:57 +02:00
struct BalancedTreeNode
{
BufferDesc* bdb_node;
2005-01-24 08:58:34 +01:00
SSHORT comp;
};
#endif // DIRTY_TREE
2001-05-23 15:26:42 +02:00
/* BufferControl -- Buffer control block -- one per system */
2001-05-23 15:26:42 +02:00
struct bcb_repeat
{
BufferDesc* bcb_bdb; /* Buffer descriptor block */
2004-04-10 02:25:22 +02:00
que bcb_page_mod; /* Que of buffers with page mod n */
};
class BufferControl : public pool_alloc_rpt<bcb_repeat, type_bcb>
2001-05-23 15:26:42 +02:00
{
public:
2008-03-10 09:16:57 +01:00
explicit BufferControl(MemoryPool& p) : bcb_memory(p) { }
UCharStack bcb_memory; /* Large block partitioned into buffers */
2004-03-07 08:58:55 +01:00
que bcb_in_use; /* Que of buffers in use */
que bcb_empty; /* Que of empty buffers */
#ifdef DIRTY_TREE
BufferDesc* bcb_btree; /* root of dirty page btree */
2008-12-05 01:56:15 +01:00
#endif
#ifdef DIRTY_LIST
que bcb_dirty; // que of dirty buffers
2008-12-05 01:56:15 +01:00
SLONG bcb_dirty_count; // count of pages in dirty page btree
#endif
Precedence* bcb_free; /* Free precedence blocks */
2004-03-07 08:58:55 +01:00
que bcb_free_lwt; /* Free latch wait blocks */
que bcb_free_slt; // Free shared latch blocks
2001-05-23 15:26:42 +02:00
SSHORT bcb_flags; /* see below */
SSHORT bcb_free_minimum; /* Threshold to activate cache writer */
ULONG bcb_count; /* Number of buffers allocated */
ULONG bcb_checkpoint; /* Count of buffers to checkpoint */
2008-12-05 01:56:15 +01:00
ULONG bcb_writeable_mark; // mark value used in precedence graph walk
#ifdef SUPERSERVER_V2
PageBitmap* bcb_prefetch; /* Bitmap of pages to prefetch */
#endif
bcb_repeat bcb_rpt[1];
2001-12-24 03:51:06 +01:00
};
2001-05-23 15:26:42 +02:00
2004-04-29 16:51:02 +02:00
const int BCB_keep_pages = 1; /* set during btc_flush(), pages not removed from dirty binary tree */
const int BCB_cache_writer = 2; /* cache writer thread has been started */
//const int BCB_checkpoint_db = 4; // WAL has requested a database checkpoint
2007-01-18 16:50:18 +01:00
const int BCB_writer_start = 4; // cache writer thread is starting now
2004-04-29 16:51:02 +02:00
const int BCB_writer_active = 8; /* no need to post writer event count */
#ifdef SUPERSERVER_V2
2004-04-29 16:51:02 +02:00
const int BCB_cache_reader = 16; /* cache reader thread has been started */
const int BCB_reader_active = 32; /* cache reader not blocked on event */
#endif
2004-04-29 16:51:02 +02:00
const int BCB_free_pending = 64; /* request cache writer to free pages */
2001-05-23 15:26:42 +02:00
/* BufferDesc -- Buffer descriptor block */
2001-05-23 15:26:42 +02:00
2004-04-29 16:51:02 +02:00
const int BDB_max_shared = 20; /* maximum number of shared latch owners per BufferDesc */
2001-05-23 15:26:42 +02:00
class BufferDesc : public pool_alloc<type_bdb>
2001-05-23 15:26:42 +02:00
{
2006-05-22 00:07:35 +02:00
public:
2008-05-21 07:50:56 +02:00
BufferDesc() : bdb_page(0, 0) {}
2006-05-22 00:07:35 +02:00
2004-04-10 02:25:22 +02:00
Database* bdb_dbb; /* Database block (for ASTs) */
Lock* bdb_lock; /* Lock block for buffer */
2004-03-07 08:58:55 +01:00
que bdb_que; /* Buffer que */
que bdb_in_use; /* queue of buffers in use */
#ifdef DIRTY_LIST
que bdb_dirty; // dirty pages LRU queue
2008-12-05 01:56:15 +01:00
#endif
2004-04-10 02:25:22 +02:00
Ods::pag* bdb_buffer; /* Actual buffer */
exp_index_buf* bdb_expanded_buffer; /* expanded index buffer */
2006-05-22 00:07:35 +02:00
PageNumber bdb_page; /* Database page number in buffer */
2001-05-23 15:26:42 +02:00
SLONG bdb_incarnation;
ULONG bdb_transactions; /* vector of dirty flags to reduce commit overhead */
SLONG bdb_mark_transaction; /* hi-water mark transaction to defer header page I/O */
#ifdef DIRTY_TREE
BufferDesc* bdb_left; /* dirty page binary tree link */
BufferDesc* bdb_right; /* dirty page binary tree link */
BufferDesc* bdb_parent; /* dirty page binary tree link */
2005-01-24 08:58:34 +01:00
SSHORT bdb_balance; /* AVL-tree balance (-1, 0, 1) */
#endif
2004-03-07 08:58:55 +01:00
que bdb_lower; /* lower precedence que */
que bdb_higher; /* higher precedence que */
que bdb_waiters; /* latch wait que */
thread_db* bdb_exclusive; /* thread holding exclusive latch */
thread_db* bdb_io; /* thread holding io latch */
2001-05-23 15:26:42 +02:00
UATOM bdb_ast_flags; /* flags manipulated at AST level */
USHORT bdb_flags;
SSHORT bdb_use_count; /* Number of active users */
SSHORT bdb_scan_count; /* concurrent sequential scans */
ULONG bdb_difference_page; // Number of page in difference file, NBAK
SLONG bdb_backup_lock_owner; // Logical owner of database_lock for buffer
2008-12-05 01:56:15 +01:00
ULONG bdb_writeable_mark; // mark value used in precedence graph walk
que bdb_shared; // shared latches queue
2001-12-24 03:51:06 +01:00
};
2001-05-23 15:26:42 +02:00
/* bdb_flags */
// to clear BDB_dirty use clear_page_dirty_flag()
2004-04-29 16:51:02 +02:00
const int BDB_dirty = 1; /* page has been updated but not written yet */
const int BDB_garbage_collect = 2; /* left by scan for garbage collector */
const int BDB_writer = 4; /* someone is updating the page */
const int BDB_marked = 8; /* page has been updated */
const int BDB_must_write = 16; /* forces a write as soon as the page is released */
const int BDB_faked = 32; /* page was just allocated */
//const int BDB_journal = 64; // Journal buffer
2004-04-29 16:51:02 +02:00
const int BDB_system_dirty = 128; /* system transaction has marked dirty */
const int BDB_io_error = 256; /* page i/o error */
const int BDB_read_pending = 512; /* read is pending */
const int BDB_free_pending = 1024; /* buffer being freed for reuse */
const int BDB_not_valid = 2048; /* i/o error invalidated buffer */
const int BDB_db_dirty = 4096; /* page must be written to database */
const int BDB_checkpoint = 8192; /* page must be written by next checkpoint */
const int BDB_prefetch = 16384; /* page has been prefetched but not yet referenced */
const int BDB_no_blocking_ast = 32768; /* No blocking AST registered with page lock */
2001-05-23 15:26:42 +02:00
/* bdb_ast_flags */
2004-04-29 16:51:02 +02:00
const int BDB_blocking = 1; /* a blocking ast was sent while page locked */
2001-05-23 15:26:42 +02:00
/* PRE -- Precedence block */
class Precedence : public pool_alloc<type_pre>
2001-05-23 15:26:42 +02:00
{
2008-05-10 05:44:57 +02:00
public:
BufferDesc* pre_hi;
BufferDesc* pre_low;
que pre_lower;
que pre_higher;
SSHORT pre_flags;
2001-12-24 03:51:06 +01:00
};
2001-05-23 15:26:42 +02:00
2004-04-29 16:51:02 +02:00
const int PRE_cleared = 1;
2001-05-23 15:26:42 +02:00
/* Compatibility matrix for latch types.
An exclusive latch is needed to modify a page. Before
marking a page an 'io-prevention' latch is needed: a mark latch.
To look at a buffer, a shared latch is needed. To write a page,
2008-12-05 01:56:15 +01:00
an io latch is needed.
2001-05-23 15:26:42 +02:00
2008-12-05 01:56:15 +01:00
Exclusive and shared latches interact. Io and mark latches
2001-05-23 15:26:42 +02:00
interact.
An mark latch is implemented as an io latch.
2008-12-05 01:56:15 +01:00
2001-05-23 15:26:42 +02:00
Latches are granted in the order in which they are
queued with one notable exception -- if buffer write
is in-progress then shared latches are granted ahead
2008-12-05 01:56:15 +01:00
of any pending exclusive latches.
2001-05-23 15:26:42 +02:00
shared io exclusive mark
-------------------------------------------------
shared 1 - 0 -
io - 0 - 0
exclusive 0 - 0 -
mark - 0 - 0 */
/* LATCH types */
2004-05-24 19:31:47 +02:00
enum LATCH
2001-05-23 15:26:42 +02:00
{
LATCH_none,
LATCH_shared,
LATCH_io,
LATCH_exclusive,
LATCH_mark
2004-05-24 19:31:47 +02:00
};
2001-05-23 15:26:42 +02:00
/* LWT -- Latch wait block */
class LatchWait : public pool_alloc<type_lwt>
2001-05-23 15:26:42 +02:00
{
2008-05-10 05:44:57 +02:00
public:
thread_db* lwt_tdbb;
LATCH lwt_latch; /* latch type requested */
que lwt_waiters;/* latch queue */
Firebird::Semaphore lwt_sem; /* grant event to wait on */
USHORT lwt_flags;
2001-12-24 03:51:06 +01:00
};
2001-05-23 15:26:42 +02:00
2004-04-29 16:51:02 +02:00
const int LWT_pending = 1; /* latch request is pending */
2001-05-23 15:26:42 +02:00
// Shared Latch
class SharedLatch
{
public:
thread_db* slt_tdbb; // thread holding latch
BufferDesc* slt_bdb; // buffer for which is this latch
que slt_tdbb_que; // thread's latches queue
que slt_bdb_que; // buffer's latches queue
};
#ifdef SUPERSERVER_V2
2003-07-14 12:35:49 +02:00
#include "../jrd/os/pio.h"
2001-05-23 15:26:42 +02:00
// Constants used by prefetch mechanism
2001-05-23 15:26:42 +02:00
const int PREFETCH_MAX_TRANSFER = 16384; // maximum block I/O transfer (bytes)
// maximum pages allowed per prefetch request
const int PREFETCH_MAX_PAGES = (2 * PREFETCH_MAX_TRANSFER / MIN_PAGE_SIZE);
2001-05-23 15:26:42 +02:00
// Prefetch block
2001-05-23 15:26:42 +02:00
class Prefetch : public pool_alloc<type_prf>
2001-05-23 15:26:42 +02:00
{
2008-05-10 05:44:57 +02:00
public:
thread_db* prf_tdbb; /* thread database context */
2001-05-23 15:26:42 +02:00
SLONG prf_start_page; /* starting page of multipage prefetch */
USHORT prf_max_prefetch; /* maximum no. of pages to prefetch */
USHORT prf_page_count; /* actual no. of pages being prefetched */
phys_io_blk prf_piob; /* physical I/O status block */
2001-05-23 15:26:42 +02:00
SCHAR* prf_aligned_buffer; /* buffer address aligned for raw (OS cache bypass) I/O */
SCHAR* prf_io_buffer; /* I/O buffer address */
UCHAR prf_flags;
BufferDesc* prf_bdbs[PREFETCH_MAX_TRANSFER / MIN_PAGE_SIZE];
2001-05-23 15:26:42 +02:00
SCHAR prf_unaligned_buffer[PREFETCH_MAX_TRANSFER + MIN_PAGE_SIZE];
2001-12-24 03:51:06 +01:00
};
2001-05-23 15:26:42 +02:00
2004-04-29 16:51:02 +02:00
const int PRF_active = 1; /* prefetch block currently in use */
#endif // SUPERSERVER_V2
2001-05-23 15:26:42 +02:00
} //namespace Jrd
#endif // JRD_CCH_H