mirror of
https://github.com/FirebirdSQL/firebird.git
synced 2025-01-24 04:43:03 +01:00
Cleanup the dead code remaining from the legacy scrollable cursors feature.
This commit is contained in:
parent
f0935fb759
commit
e5a732d782
@ -203,7 +203,7 @@ USHORT getNodeSize(const IndexNode* indexNode, bool leafNode)
|
||||
}
|
||||
else if (indexNode->length == 0)
|
||||
{
|
||||
if (indexNode->prefix == 0) {
|
||||
if (indexNode->prefix == 0) {
|
||||
internalFlags = BTN_ZERO_PREFIX_ZERO_LENGTH_FLAG;
|
||||
}
|
||||
else {
|
||||
@ -366,32 +366,6 @@ bool keyEquality(USHORT length, const UCHAR* data, const IndexNode* indexNode)
|
||||
}
|
||||
|
||||
|
||||
UCHAR* nextNode(IndexNode* node, UCHAR* pointer, btree_exp** expanded_node)
|
||||
{
|
||||
/**************************************
|
||||
*
|
||||
* n e x t N o d e
|
||||
*
|
||||
**************************************
|
||||
*
|
||||
* Functional description
|
||||
* Find the next node on both the index page
|
||||
* and its associated expanded buffer.
|
||||
*
|
||||
**************************************/
|
||||
|
||||
pointer = readNode(node, pointer, true);
|
||||
|
||||
if (*expanded_node)
|
||||
{
|
||||
*expanded_node = (btree_exp*) ((UCHAR*) (*expanded_node)->btx_data +
|
||||
node->prefix + node->length);
|
||||
}
|
||||
|
||||
return pointer;
|
||||
}
|
||||
|
||||
|
||||
UCHAR* readJumpInfo(IndexJumpInfo* jumpInfo, UCHAR* pagePointer)
|
||||
{
|
||||
/**************************************
|
||||
|
@ -34,16 +34,6 @@
|
||||
#include "../jrd/ods.h"
|
||||
#include "../common/classes/array.h"
|
||||
|
||||
// format of expanded index node, used for backwards navigation
|
||||
struct btree_exp
|
||||
{
|
||||
UCHAR btx_previous_length; // AB: total size for previous node --length of data for previous node
|
||||
UCHAR btx_btr_previous_length; // length of data for previous node on btree page
|
||||
UCHAR btx_data[1]; // expanded data element
|
||||
};
|
||||
|
||||
const int BTX_SIZE = 2;
|
||||
|
||||
// Flags (3-bits) used for index node
|
||||
const int BTN_NORMAL_FLAG = 0;
|
||||
const int BTN_END_LEVEL_FLAG = 1;
|
||||
@ -54,16 +44,6 @@ const int BTN_ONE_LENGTH_FLAG = 5;
|
||||
//const int BTN_ZERO_PREFIX_ONE_LENGTH_FLAG = 6;
|
||||
//const int BTN_GET_MORE_FLAGS = 7;
|
||||
|
||||
// format of expanded index buffer
|
||||
struct exp_index_buf
|
||||
{
|
||||
USHORT exp_length;
|
||||
ULONG exp_incarnation;
|
||||
btree_exp exp_nodes[1];
|
||||
};
|
||||
|
||||
const size_t EXP_SIZE = OFFSETA (exp_index_buf*, exp_nodes);
|
||||
|
||||
struct dynKey
|
||||
{
|
||||
USHORT keyLength;
|
||||
@ -87,10 +67,6 @@ namespace BTreeNode {
|
||||
|
||||
bool keyEquality(USHORT length, const UCHAR* data, const Ods::IndexNode* indexNode);
|
||||
|
||||
UCHAR* nextNode(Ods::IndexNode* node, UCHAR* pointer, btree_exp** expanded_node);
|
||||
|
||||
//void quad_put(SLONG value, UCHAR *data);
|
||||
|
||||
UCHAR* readJumpInfo(Ods::IndexJumpInfo* jumpInfo, UCHAR* pagePointer);
|
||||
UCHAR* readJumpNode(Ods::IndexJumpNode* jumpNode, UCHAR* pagePointer);
|
||||
inline UCHAR* readNode(Ods::IndexNode* indexNode, UCHAR* pagePointer, bool leafNode);
|
||||
|
@ -715,7 +715,6 @@ void BTR_evaluate(thread_db* tdbb, IndexRetrieval* retrieval, RecordBitmap** bit
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
||||
if (node.isEndLevel) {
|
||||
break;
|
||||
}
|
||||
@ -4273,10 +4272,6 @@ static SLONG find_page(btree_page* bucket, const temporary_key* key,
|
||||
find_record_number = NO_VALUE;
|
||||
}
|
||||
|
||||
// UCHAR* p; // pointer on key
|
||||
// UCHAR* q; // pointer on processing node
|
||||
// UCHAR* keyEnd; // pointer on end of key
|
||||
// UCHAR* nodeEnd; // pointer on end of processing node
|
||||
USHORT prefix = 0; // last computed prefix against processed node
|
||||
|
||||
// pointer where to start reading next node
|
||||
@ -4397,8 +4392,7 @@ static SLONG find_page(btree_page* bucket, const temporary_key* key,
|
||||
}
|
||||
prefix = p - key->key_data;
|
||||
|
||||
// If this is the end of bucket, return node. Somebody else can
|
||||
// deal with this
|
||||
// If this is the end of bucket, return node. Somebody else can deal with this.
|
||||
if (node.isEndBucket) {
|
||||
return node.pageNumber;
|
||||
}
|
||||
@ -4667,6 +4661,7 @@ static contents garbage_collect(thread_db* tdbb, WIN* window, SLONG parent_numbe
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
leftPointer = BTreeNode::readNode(&leftNode, leftPointer, leafPage);
|
||||
@ -5064,9 +5059,11 @@ static void generate_jump_nodes(thread_db* tdbb, btree_page* page,
|
||||
while (pointer < endpoint)
|
||||
{
|
||||
pointer = BTreeNode::readNode(&node, pointer, leafPage);
|
||||
|
||||
if (node.isEndBucket || node.isEndLevel) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (node.length)
|
||||
{
|
||||
UCHAR* q = currentData + node.prefix;
|
||||
@ -5986,7 +5983,6 @@ static contents remove_leaf_node(thread_db* tdbb, index_insertion* insertion, WI
|
||||
ULONG pages = 0;
|
||||
while (true)
|
||||
{
|
||||
|
||||
// if we find the right one, quit
|
||||
if (insertion->iib_number == node.recordNumber && !node.isEndBucket && !node.isEndLevel)
|
||||
{
|
||||
|
@ -29,9 +29,6 @@
|
||||
#include "../jrd/req.h"
|
||||
#include "../jrd/exe.h"
|
||||
|
||||
struct btree_exp;
|
||||
struct exp_index_buf;
|
||||
|
||||
USHORT BTR_all(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::IndexDescAlloc**, Jrd::RelationPages*);
|
||||
void BTR_complement_key(Jrd::temporary_key*);
|
||||
void BTR_create(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::index_desc*, USHORT, Jrd::sort_context*, Jrd::SelectivityList&);
|
||||
@ -47,7 +44,6 @@ void BTR_insert(Jrd::thread_db*, Jrd::win*, Jrd::index_insertion*);
|
||||
Jrd::idx_e BTR_key(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::Record*, Jrd::index_desc*, Jrd::temporary_key*,
|
||||
Jrd::idx_null_state*, const bool);
|
||||
USHORT BTR_key_length(Jrd::thread_db*, Jrd::jrd_rel*, Jrd::index_desc*);
|
||||
UCHAR* BTR_last_node(Ods::btree_page*, exp_index_buf*, btree_exp**);
|
||||
Ods::btree_page* BTR_left_handoff(Jrd::thread_db*, Jrd::win*, Ods::btree_page*, SSHORT);
|
||||
USHORT BTR_lookup(Jrd::thread_db*, Jrd::jrd_rel*, USHORT, Jrd::index_desc*, Jrd::RelationPages*);
|
||||
Jrd::idx_e BTR_make_key(Jrd::thread_db*, USHORT, Jrd::jrd_nod**, Jrd::index_desc*, Jrd::temporary_key*, bool);
|
||||
|
@ -650,7 +650,6 @@ pag* CCH_fake(thread_db* tdbb, WIN* window, SSHORT latch_wait)
|
||||
|
||||
MOVE_CLEAR(bdb->bdb_buffer, (SLONG) dbb->dbb_page_size);
|
||||
window->win_buffer = bdb->bdb_buffer;
|
||||
window->win_expanded_buffer = NULL;
|
||||
window->win_bdb = bdb;
|
||||
window->win_flags = 0;
|
||||
CCH_MARK(tdbb, window);
|
||||
@ -818,18 +817,8 @@ SSHORT CCH_fetch_lock(thread_db* tdbb, WIN* window, USHORT lock_type, SSHORT wai
|
||||
bdb->bdb_flags |= BDB_writer;
|
||||
}
|
||||
|
||||
// the expanded index buffer is only good when the page is
|
||||
// fetched for read; if it is ever fetched for write, it must be discarded
|
||||
|
||||
if (bdb->bdb_expanded_buffer && (lock_type > LCK_read))
|
||||
{
|
||||
delete bdb->bdb_expanded_buffer;
|
||||
bdb->bdb_expanded_buffer = NULL;
|
||||
}
|
||||
|
||||
window->win_bdb = bdb;
|
||||
window->win_buffer = bdb->bdb_buffer;
|
||||
window->win_expanded_buffer = bdb->bdb_expanded_buffer;
|
||||
|
||||
// lock_buffer returns 0 or 1 or -1.
|
||||
const SSHORT lock_result = lock_buffer(tdbb, bdb, wait, page_type);
|
||||
@ -1075,8 +1064,6 @@ void CCH_fini(thread_db* tdbb)
|
||||
for (const bcb_repeat* const end = bcb->bcb_rpt + bcb->bcb_count; tail < end; tail++)
|
||||
{
|
||||
BufferDesc* bdb = tail->bcb_bdb;
|
||||
delete bdb->bdb_expanded_buffer;
|
||||
bdb->bdb_expanded_buffer = NULL;
|
||||
PAGE_LOCK_RELEASE(bdb->bdb_lock);
|
||||
}
|
||||
}
|
||||
@ -1974,11 +1961,6 @@ void CCH_release(thread_db* tdbb, WIN* window, const bool release_tail)
|
||||
|
||||
CCH_TRACE(("R %d:%06d", window->win_page.getPageSpaceID(), window->win_page.getPageNum()));
|
||||
|
||||
// if an expanded buffer has been created, retain it for possible future use
|
||||
|
||||
bdb->bdb_expanded_buffer = window->win_expanded_buffer;
|
||||
window->win_expanded_buffer = NULL;
|
||||
|
||||
// A large sequential scan has requested that the garbage
|
||||
// collector garbage collect. Mark the buffer so that the
|
||||
// page isn't released to the LRU tail before the garbage
|
||||
@ -3843,11 +3825,6 @@ static BufferDesc* get_buffer(thread_db* tdbb, const PageNumber page, LATCH latc
|
||||
|
||||
removeDirty(bcb, bdb);
|
||||
|
||||
// if the page has an expanded index buffer, release it
|
||||
|
||||
delete bdb->bdb_expanded_buffer;
|
||||
bdb->bdb_expanded_buffer = NULL;
|
||||
|
||||
// Cleanup any residual precedence blocks. Unless something is
|
||||
// screwed up, the only precedence blocks that can still be hanging
|
||||
// around are ones cleared at AST level.
|
||||
|
@ -48,8 +48,6 @@ DEFINE_TRACE_ROUTINE(cch_trace);
|
||||
#define CCH_TRACEE_AST(message) // nothing
|
||||
#endif
|
||||
|
||||
struct exp_index_buf;
|
||||
|
||||
namespace Ods {
|
||||
struct pag;
|
||||
}
|
||||
@ -130,7 +128,6 @@ public:
|
||||
que bdb_in_use; // queue of buffers in use
|
||||
que bdb_dirty; // dirty pages LRU queue
|
||||
Ods::pag* bdb_buffer; // Actual buffer
|
||||
exp_index_buf* bdb_expanded_buffer; // expanded index buffer
|
||||
PageNumber bdb_page; // Database page number in buffer
|
||||
SLONG bdb_incarnation;
|
||||
ULONG bdb_transactions; // vector of dirty flags to reduce commit overhead
|
||||
|
@ -459,11 +459,9 @@ struct win
|
||||
{
|
||||
PageNumber win_page;
|
||||
Ods::pag* win_buffer;
|
||||
exp_index_buf* win_expanded_buffer;
|
||||
class BufferDesc* win_bdb;
|
||||
SSHORT win_scans;
|
||||
USHORT win_flags;
|
||||
// explicit win(SLONG wp) : win_page(wp), win_flags(0) {}
|
||||
explicit win(const PageNumber& wp)
|
||||
: win_page(wp), win_bdb(NULL), win_flags(0)
|
||||
{}
|
||||
|
Loading…
Reference in New Issue
Block a user