2001-05-23 15:26:42 +02:00
|
|
|
/*
|
|
|
|
* PROGRAM: JRD Sort
|
2003-12-11 11:33:30 +01:00
|
|
|
* MODULE: sort.cpp
|
2001-05-23 15:26:42 +02:00
|
|
|
* DESCRIPTION: Top level sort module
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the Interbase Public
|
|
|
|
* License Version 1.0 (the "License"); you may not use this file
|
|
|
|
* except in compliance with the License. You may obtain a copy
|
|
|
|
* of the License at http://www.Inprise.com/IPL.html
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an
|
|
|
|
* "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express
|
|
|
|
* or implied. See the License for the specific language governing
|
|
|
|
* rights and limitations under the License.
|
|
|
|
*
|
|
|
|
* The Original Code was created by Inprise Corporation
|
|
|
|
* and its predecessors. Portions created by Inprise Corporation are
|
|
|
|
* Copyright (C) Inprise Corporation.
|
|
|
|
*
|
|
|
|
* All Rights Reserved.
|
|
|
|
* Contributor(s): ______________________________________.
|
2002-07-01 17:07:18 +02:00
|
|
|
*
|
|
|
|
* 2001-09-24 SJL - Temporary fix for large sort file bug
|
|
|
|
*
|
2002-10-30 07:40:58 +01:00
|
|
|
* 2002.10.29 Sean Leyne - Removed obsolete "Netware" port
|
|
|
|
*
|
2002-10-31 06:06:02 +01:00
|
|
|
* 2002.10.30 Sean Leyne - Removed support for obsolete "PC_PLATFORM" define
|
|
|
|
*
|
2001-05-23 15:26:42 +02:00
|
|
|
*/
|
|
|
|
|
2001-07-29 19:42:23 +02:00
|
|
|
#include "firebird.h"
|
2001-05-23 15:26:42 +02:00
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include "../jrd/common.h"
|
|
|
|
#include "../jrd/jrd.h"
|
|
|
|
#include "../jrd/sort.h"
|
2003-11-11 13:19:20 +01:00
|
|
|
#include "gen/iberror.h"
|
2001-05-23 15:26:42 +02:00
|
|
|
#include "../jrd/intl.h"
|
|
|
|
#include "../jrd/gdsassert.h"
|
|
|
|
#include "../jrd/rse.h"
|
|
|
|
#include "../jrd/val.h"
|
|
|
|
#include "../jrd/err_proto.h"
|
|
|
|
#include "../jrd/gds_proto.h"
|
|
|
|
#include "../jrd/sort_proto.h"
|
2004-05-18 00:30:09 +02:00
|
|
|
#include "../jrd/thread_proto.h"
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2001-07-12 07:46:06 +02:00
|
|
|
#ifdef HAVE_SYS_TYPES_H
|
|
|
|
#include <sys/types.h>
|
|
|
|
#endif
|
|
|
|
|
2006-07-25 15:03:11 +02:00
|
|
|
#ifdef HAVE_LIMITS_H
|
|
|
|
#include <limits.h> /* On some systems for ULONG_MAX */
|
|
|
|
#endif
|
|
|
|
|
2001-07-12 07:46:06 +02:00
|
|
|
#ifdef HAVE_SYS_UIO_H
|
|
|
|
#include <sys/uio.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2002-07-06 07:32:02 +02:00
|
|
|
#ifdef HAVE_STDIO_H
|
|
|
|
#include <stdio.h>
|
|
|
|
#endif
|
|
|
|
|
2002-04-29 13:22:26 +02:00
|
|
|
#ifdef WIN_NT
|
2001-05-23 15:26:42 +02:00
|
|
|
/* for SEEK_SET */
|
2003-11-16 13:23:24 +01:00
|
|
|
#include <io.h> // lseek, read, write, close
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
|
2004-05-07 00:11:24 +02:00
|
|
|
const ULONG IO_RETRY = 20;
|
|
|
|
const USHORT RUN_GROUP = 8;
|
|
|
|
const USHORT MAX_MERGE_LEVEL = 2;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-20 15:57:40 +01:00
|
|
|
using namespace Jrd;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// The sort buffer size should be just under a multiple of the
|
|
|
|
// hardware memory page size to account for memory allocator
|
|
|
|
// overhead. On most platorms, this saves 4KB to 8KB per sort
|
|
|
|
// buffer from being allocated but not used.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-05-07 00:11:24 +02:00
|
|
|
const ULONG SORT_BUFFER_CHUNK_SIZE = 4096;
|
|
|
|
const ULONG MIN_SORT_BUFFER_SIZE = SORT_BUFFER_CHUNK_SIZE * 4;
|
|
|
|
const ULONG MAX_SORT_BUFFER_SIZE = SORT_BUFFER_CHUNK_SIZE * 32;
|
2002-04-29 13:22:26 +02:00
|
|
|
|
2007-03-25 18:09:00 +02:00
|
|
|
// the size of sr_bckptr (everything before sort_record) in bytes
|
|
|
|
#define SIZEOF_SR_BCKPTR OFFSET(sr*, sr_sort_record)
|
|
|
|
// the size of sr_bckptr in # of 32 bit longwords
|
2007-04-01 16:52:25 +02:00
|
|
|
#define SIZEOF_SR_BCKPTR_IN_LONGS static_cast<signed>(SIZEOF_SR_BCKPTR / sizeof(SLONG))
|
2007-03-25 18:09:00 +02:00
|
|
|
// offset in array of pointers to back record pointer (sr_bckptr)
|
2007-04-01 16:52:25 +02:00
|
|
|
#define BACK_OFFSET (-static_cast<signed>(SIZEOF_SR_BCKPTR / sizeof(SLONG*)))
|
2007-03-25 18:09:00 +02:00
|
|
|
|
2008-01-16 10:48:41 +01:00
|
|
|
//#define DIFF_LONGS(a, b) ((a) - (b))
|
2004-11-24 10:22:07 +01:00
|
|
|
#define SWAP_LONGS(a, b, t) {t = a; a = b; b = t;}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Compare p and q both SORTP pointers for l 32-bit longwords
|
|
|
|
// l != 0 if p and q are not equal for l bytes
|
2001-05-23 15:26:42 +02:00
|
|
|
#define DO_32_COMPARE(p, q, l) do if (*p++ != *q++) break; while (--l);
|
|
|
|
|
2004-11-24 10:22:07 +01:00
|
|
|
#define MOVE_32(len, from, to) memcpy(to, from, len * 4)
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2005-05-28 00:45:31 +02:00
|
|
|
// These values are not defined as const as they are passed to
|
2003-09-29 13:00:38 +02:00
|
|
|
// the diddle_key routines which mangles them.
|
|
|
|
// As the diddle_key routines differ on VAX (little endian) and non VAX
|
2005-05-28 00:45:31 +02:00
|
|
|
// (big endian) patforms, making the following const caused a core on the
|
2003-09-29 13:00:38 +02:00
|
|
|
// Intel Platforms, while Solaris was working fine.
|
|
|
|
|
2002-07-05 17:00:26 +02:00
|
|
|
static ULONG low_key[] = { 0, 0, 0, 0, 0, 0 };
|
2005-05-28 00:45:31 +02:00
|
|
|
|
2002-07-01 17:07:18 +02:00
|
|
|
static ULONG high_key[] = {
|
2001-12-24 03:51:06 +01:00
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX,
|
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX,
|
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX,
|
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX,
|
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX,
|
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX,
|
2002-04-29 13:22:26 +02:00
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX,
|
|
|
|
ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX, ULONG_MAX};
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2004-03-28 11:10:30 +02:00
|
|
|
static sort_record* get_merge(merge_control*, sort_context*, RSE_GET_MODE);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
static void diddle_key(UCHAR*, sort_context*, bool);
|
2004-03-28 11:10:30 +02:00
|
|
|
static sort_record* get_merge(merge_control*, sort_context*);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
static ULONG allocate_memory(sort_context*, ULONG, ULONG, bool);
|
2004-03-19 07:14:53 +01:00
|
|
|
static void error_memory(sort_context*);
|
2007-11-17 01:38:16 +01:00
|
|
|
static inline FB_UINT64 find_file_space(sort_context*, ULONG);
|
2007-11-12 15:26:44 +01:00
|
|
|
static inline void free_file_space(sort_context*, FB_UINT64, ULONG);
|
2004-03-19 07:14:53 +01:00
|
|
|
static void init(sort_context*);
|
|
|
|
static bool local_fini(sort_context*, Attachment*);
|
|
|
|
static void merge_runs(sort_context*, USHORT);
|
2004-03-28 11:10:30 +02:00
|
|
|
static void quick(SLONG, SORTP **, ULONG);
|
2004-03-19 07:14:53 +01:00
|
|
|
static ULONG order(sort_context*);
|
2007-04-03 17:01:37 +02:00
|
|
|
static void order_and_save(sort_context*);
|
2004-03-19 07:14:53 +01:00
|
|
|
static void put_run(sort_context*);
|
|
|
|
static void sort(sort_context*);
|
2007-04-03 17:01:37 +02:00
|
|
|
static void sort_runs_by_seek(sort_context*, int);
|
2003-03-03 10:22:32 +01:00
|
|
|
#ifdef NOT_USED_OR_REPLACED
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef DEBUG
|
2004-03-19 07:14:53 +01:00
|
|
|
static void validate(sort_context*);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
2003-03-03 10:22:32 +01:00
|
|
|
#endif
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
#ifdef DEV_BUILD
|
2007-04-05 11:13:10 +02:00
|
|
|
static void check_file(const sort_context*, const run_control*);
|
2007-04-03 17:01:37 +02:00
|
|
|
#define CHECK_FILE(a) check_file((a), NULL);
|
|
|
|
#define CHECK_FILE2(a, b) check_file((a), (b));
|
|
|
|
#else
|
|
|
|
#define CHECK_FILE(a)
|
|
|
|
#define CHECK_FILE2(a, b)
|
|
|
|
#endif
|
|
|
|
|
2006-06-02 06:23:41 +02:00
|
|
|
static const char* SCRATCH = "fb_sort_";
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2002-09-17 07:58:40 +02:00
|
|
|
#ifdef WORDS_BIGENDIAN
|
2004-03-19 07:14:53 +01:00
|
|
|
void SORT_diddle_key(UCHAR* record, sort_context* scb, bool direction)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ d i d d l e _ k e y ( n o n - V A X )
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Perform transformation between the natural form of a record
|
|
|
|
* and a form that can be sorted in unsigned comparison order.
|
|
|
|
*
|
2003-12-11 11:33:30 +01:00
|
|
|
* direction - true for SORT_put() and false for SORT_get()
|
2001-05-23 15:26:42 +02:00
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-11 06:04:26 +01:00
|
|
|
const sort_key_def* key = scb->scb_description;
|
|
|
|
for (const sort_key_def* const end = key + scb->scb_keys; key < end; key++)
|
2003-09-29 13:00:38 +02:00
|
|
|
{
|
|
|
|
UCHAR* p = record + key->skd_offset;
|
|
|
|
USHORT n = key->skd_length;
|
|
|
|
bool complement = key->skd_flags & SKD_descending;
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
switch (key->skd_dtype) {
|
|
|
|
case SKD_ulong:
|
|
|
|
case SKD_ushort:
|
|
|
|
case SKD_bytes:
|
|
|
|
break;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Stash embedded control info for non-fixed data types in the sort
|
|
|
|
// record and zap it so that it doesn't interfere with collation
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
case SKD_varying:
|
|
|
|
if (direction) {
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT& vlen = ((vary*) p)->vary_length;
|
|
|
|
if (!(scb->scb_flags & scb_sorted))
|
|
|
|
{
|
|
|
|
*((USHORT *) (record + key->skd_vary_offset)) = vlen;
|
2003-12-11 11:33:30 +01:00
|
|
|
const UCHAR fill_char =
|
2001-05-23 15:26:42 +02:00
|
|
|
(key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
2008-01-16 10:48:41 +01:00
|
|
|
UCHAR* fill_pos = p + sizeof(USHORT) + vlen;
|
|
|
|
const USHORT fill = n - sizeof(USHORT) - vlen;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
vlen = 0;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_cstring:
|
|
|
|
if (direction) {
|
2003-12-11 11:33:30 +01:00
|
|
|
const UCHAR fill_char = (key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (!(scb->scb_flags & scb_sorted)) {
|
2008-01-16 10:48:41 +01:00
|
|
|
const USHORT l = strlen(reinterpret_cast<char*>(p));
|
2003-12-11 11:33:30 +01:00
|
|
|
*((USHORT *) (record + key->skd_vary_offset)) = l;
|
|
|
|
UCHAR* fill_pos = p + l;
|
|
|
|
const USHORT fill = n - l;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
|
|
|
else {
|
2003-12-11 11:33:30 +01:00
|
|
|
const USHORT l = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = fill_char;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_text:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_float:
|
|
|
|
case SKD_double:
|
2003-12-11 11:33:30 +01:00
|
|
|
{
|
|
|
|
const USHORT flag = (direction || !complement) ? (direction ? TRUE : FALSE) : TRUE;
|
|
|
|
if (flag ^ (*p >> 7))
|
|
|
|
*p ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
break;
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
case SKD_long:
|
|
|
|
case SKD_short:
|
|
|
|
case SKD_quad:
|
|
|
|
case SKD_timestamp1:
|
|
|
|
case SKD_timestamp2:
|
|
|
|
case SKD_sql_time:
|
|
|
|
case SKD_sql_date:
|
|
|
|
case SKD_int64:
|
|
|
|
*p ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert(false);
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (complement && n)
|
2003-12-11 11:33:30 +01:00
|
|
|
do {
|
2001-05-23 15:26:42 +02:00
|
|
|
*p++ ^= -1;
|
2003-12-11 11:33:30 +01:00
|
|
|
} while (--n);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Flatter but don't complement control info for non-fixed
|
|
|
|
// data types when restoring the data
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_varying && !direction) {
|
|
|
|
p = record + key->skd_offset;
|
2004-02-20 07:43:27 +01:00
|
|
|
((vary*) p)->vary_length = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_cstring && !direction) {
|
|
|
|
p = record + key->skd_offset;
|
2003-12-11 11:33:30 +01:00
|
|
|
const USHORT l = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#else
|
2004-03-19 07:14:53 +01:00
|
|
|
void SORT_diddle_key(UCHAR* record, sort_context* scb, bool direction)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ d i d d l e _ k e y ( V A X )
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Perform transformation between the natural form of a record
|
|
|
|
* and a form that can be sorted in unsigned comparison order.
|
|
|
|
*
|
2003-12-11 11:33:30 +01:00
|
|
|
* direction - true for SORT_put() and false for SORT_get()
|
2001-05-23 15:26:42 +02:00
|
|
|
*
|
|
|
|
**************************************/
|
2008-01-16 10:48:41 +01:00
|
|
|
UCHAR c1, fill_char, *fill_pos;
|
|
|
|
USHORT fill;
|
2001-05-23 15:26:42 +02:00
|
|
|
SSHORT longs, flag;
|
|
|
|
ULONG lw;
|
|
|
|
|
2004-03-11 06:04:26 +01:00
|
|
|
const sort_key_def* key = scb->scb_description;
|
|
|
|
for (const sort_key_def* const end = key + scb->scb_keys; key < end; key++)
|
2003-09-29 13:00:38 +02:00
|
|
|
{
|
|
|
|
BLOB_PTR* p = (BLOB_PTR *) record + key->skd_offset;
|
|
|
|
USHORT* wp = (USHORT *) p;
|
|
|
|
SORTP* lwp = (SORTP *) p;
|
|
|
|
bool complement = key->skd_flags & SKD_descending;
|
|
|
|
USHORT n = ROUNDUP(key->skd_length, sizeof(SLONG));
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
switch (key->skd_dtype) {
|
|
|
|
case SKD_timestamp1:
|
|
|
|
case SKD_timestamp2:
|
|
|
|
case SKD_sql_date:
|
|
|
|
case SKD_sql_time:
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_ulong:
|
|
|
|
case SKD_ushort:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_text:
|
|
|
|
case SKD_bytes:
|
|
|
|
case SKD_cstring:
|
|
|
|
case SKD_varying:
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Stash embedded control info for non-fixed data types in the sort
|
|
|
|
// record and zap it so that it doesn't interfere with collation
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_varying && direction) {
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT& vlen = ((vary*) p)->vary_length;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (!(scb->scb_flags & scb_sorted)) {
|
2008-01-16 10:48:41 +01:00
|
|
|
*((USHORT*) (record + key->skd_vary_offset)) = vlen;
|
2001-05-23 15:26:42 +02:00
|
|
|
fill_char =
|
|
|
|
(key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
2008-01-16 10:48:41 +01:00
|
|
|
fill_pos = p + sizeof(USHORT) + vlen;
|
|
|
|
fill = n - sizeof(USHORT) - vlen;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
vlen = 0;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_cstring && direction) {
|
|
|
|
fill_char = (key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
|
|
|
if (!(scb->scb_flags & scb_sorted)) {
|
2008-01-16 10:48:41 +01:00
|
|
|
const USHORT l = strlen(reinterpret_cast<char*>(p));
|
|
|
|
*((USHORT*) (record + key->skd_vary_offset)) = l;
|
2001-05-23 15:26:42 +02:00
|
|
|
fill_pos = p + l;
|
|
|
|
fill = n - l;
|
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
|
|
|
else {
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT l = *((USHORT*) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = fill_char;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
longs = n >> SHIFTLONG;
|
|
|
|
while (--longs >= 0) {
|
|
|
|
c1 = p[3];
|
|
|
|
p[3] = *p;
|
|
|
|
*p++ = c1;
|
|
|
|
c1 = p[1];
|
|
|
|
p[1] = *p;
|
|
|
|
*p = c1;
|
|
|
|
p += 3;
|
|
|
|
}
|
2003-09-29 13:00:38 +02:00
|
|
|
p = (BLOB_PTR*) wp;
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_short:
|
|
|
|
p[1] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_long:
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_quad:
|
|
|
|
p[7] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_int64:
|
2005-05-28 00:45:31 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// INT64's fit in TWO LONGS, and hence the SWAP has to happen
|
|
|
|
// here for the right order comparison using DO_32_COMPARE
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
if (!direction)
|
|
|
|
SWAP_LONGS(lwp[0], lwp[1], lw);
|
|
|
|
|
|
|
|
p[7] ^= 1 << 7;
|
|
|
|
|
|
|
|
if (direction)
|
|
|
|
SWAP_LONGS(lwp[0], lwp[1], lw);
|
|
|
|
break;
|
|
|
|
|
|
|
|
#ifdef IEEE
|
|
|
|
case SKD_double:
|
|
|
|
if (!direction) {
|
|
|
|
lw = lwp[0];
|
|
|
|
lwp[0] = lwp[1];
|
|
|
|
lwp[1] = lw;
|
|
|
|
}
|
|
|
|
flag = (direction || !complement) ? direction : TRUE;
|
|
|
|
if (flag ^ (p[7] >> 7))
|
|
|
|
p[7] ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
if (direction) {
|
|
|
|
lw = lwp[0];
|
|
|
|
lwp[0] = lwp[1];
|
|
|
|
lwp[1] = lw;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_float:
|
|
|
|
flag = (direction || !complement) ? direction : TRUE;
|
|
|
|
if (flag ^ (p[3] >> 7))
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
break;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
#else // IEEE
|
2001-05-23 15:26:42 +02:00
|
|
|
case SKD_double:
|
|
|
|
w = wp[2];
|
|
|
|
wp[2] = wp[3];
|
|
|
|
wp[3] = w;
|
|
|
|
|
|
|
|
case SKD_d_float:
|
|
|
|
case SKD_float:
|
|
|
|
if (!direction)
|
2008-01-16 10:48:41 +01:00
|
|
|
{
|
|
|
|
if (complement)
|
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (p[3] & 1 << 7)
|
|
|
|
complement = !complement;
|
|
|
|
else
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
else
|
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (p[3] & 1 << 7)
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
w = wp[0];
|
|
|
|
wp[0] = wp[1];
|
|
|
|
wp[1] = w;
|
|
|
|
if (direction)
|
2008-01-16 10:48:41 +01:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (p[3] & 1 << 7)
|
|
|
|
complement = !complement;
|
|
|
|
else
|
|
|
|
p[3] ^= 1 << 7;
|
2008-01-16 10:48:41 +01:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
2003-09-29 13:00:38 +02:00
|
|
|
#endif // IEEE
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
default:
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert(false);
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
if (complement && n)
|
2008-01-16 10:48:41 +01:00
|
|
|
{
|
|
|
|
do {
|
2001-05-23 15:26:42 +02:00
|
|
|
*p++ ^= -1;
|
2008-01-16 10:48:41 +01:00
|
|
|
} while (--n);
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Flatter but don't complement control info for non-fixed
|
|
|
|
// data types when restoring the data
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_varying && !direction) {
|
|
|
|
p = (BLOB_PTR *) record + key->skd_offset;
|
2004-02-20 07:43:27 +01:00
|
|
|
((vary*) p)->vary_length = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_cstring && !direction) {
|
|
|
|
p = (BLOB_PTR *) record + key->skd_offset;
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT l = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
void SORT_fini(sort_context* scb, Attachment* att)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ f i n i
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Finish sort, and release all resources.
|
|
|
|
*
|
|
|
|
**************************************/
|
|
|
|
|
2003-09-28 20:23:26 +02:00
|
|
|
if (scb && local_fini(scb, att))
|
2006-05-31 10:53:00 +02:00
|
|
|
delete scb;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2006-10-07 12:53:01 +02:00
|
|
|
void SORT_get(thread_db* tdbb,
|
2006-05-31 10:53:00 +02:00
|
|
|
sort_context* scb,
|
|
|
|
ULONG ** record_address,
|
|
|
|
RSE_GET_MODE mode)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ g e t ( I B _ V 4 _ 1 )
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Get a record from sort (in order, of course).
|
|
|
|
* The address of the record is returned in <record_address>
|
2003-12-11 11:33:30 +01:00
|
|
|
* If the stream is exhausted, SORT_get puts NULL in <record_address>.
|
2001-05-23 15:26:42 +02:00
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-19 07:14:53 +01:00
|
|
|
sort_record* record;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-10-07 12:53:01 +02:00
|
|
|
scb->scb_status_vector = tdbb->tdbb_status_vector;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If there were runs, get the records from the merge
|
|
|
|
// tree. Otherwise everything fit in memory.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (scb->scb_merge)
|
|
|
|
record = get_merge(scb->scb_merge, scb, mode);
|
|
|
|
else
|
|
|
|
switch (mode) {
|
|
|
|
case RSE_get_forward:
|
|
|
|
if (scb->scb_flags & scb_initialized)
|
|
|
|
scb->scb_flags &= ~scb_initialized;
|
|
|
|
|
2003-12-11 11:33:30 +01:00
|
|
|
while (true) {
|
2001-05-23 15:26:42 +02:00
|
|
|
if (scb->scb_next_pointer > scb->scb_last_pointer) {
|
|
|
|
record = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (record = *scb->scb_next_pointer++)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RSE_get_backward:
|
|
|
|
if (scb->scb_flags & scb_initialized) {
|
|
|
|
scb->scb_flags &= ~scb_initialized;
|
|
|
|
scb->scb_next_pointer = scb->scb_last_pointer + 1;
|
|
|
|
}
|
2003-09-29 13:00:38 +02:00
|
|
|
else {
|
2005-05-28 00:45:31 +02:00
|
|
|
// By definition, the next pointer is on the next record,
|
2003-09-29 13:00:38 +02:00
|
|
|
// so we have to go back one to get to the last fetched record.
|
|
|
|
// This is easier than changing the sense of the next pointer.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
scb->scb_next_pointer--;
|
|
|
|
if (scb->scb_next_pointer <= scb->scb_first_pointer + 1) {
|
|
|
|
record = NULL;
|
|
|
|
scb->scb_next_pointer++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
while (true) {
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_next_pointer--;
|
|
|
|
if (scb->scb_next_pointer <= scb->scb_first_pointer) {
|
|
|
|
record = NULL;
|
|
|
|
scb->scb_next_pointer++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (record = *scb->scb_next_pointer)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Reset next pointer to one greater than the last fetched
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
scb->scb_next_pointer++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert(FALSE);
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (record)
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) record->sort_record_key, scb, false);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
*record_address = (ULONG *) record;
|
2006-10-07 12:53:01 +02:00
|
|
|
|
2007-12-03 16:46:39 +01:00
|
|
|
tdbb->bumpStats(RuntimeStatistics::SORT_GETS);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
#else
|
2006-10-07 12:53:01 +02:00
|
|
|
void SORT_get(thread_db* tdbb,
|
2006-05-31 10:53:00 +02:00
|
|
|
sort_context* scb,
|
|
|
|
ULONG** record_address)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ g e t
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Get a record from sort (in order, of course).
|
|
|
|
* The address of the record is returned in <record_address>
|
2003-12-11 11:33:30 +01:00
|
|
|
* If the stream is exhausted, SORT_get puts NULL in <record_address>.
|
2001-05-23 15:26:42 +02:00
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-19 07:14:53 +01:00
|
|
|
sort_record* record;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-10-07 12:53:01 +02:00
|
|
|
scb->scb_status_vector = tdbb->tdbb_status_vector;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If there weren't any runs, everything fit in memory. Just return stuff.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (!scb->scb_merge)
|
2003-12-11 11:33:30 +01:00
|
|
|
while (true) {
|
2001-05-23 15:26:42 +02:00
|
|
|
if (scb->scb_records == 0) {
|
|
|
|
record = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
scb->scb_records--;
|
2001-12-24 03:51:06 +01:00
|
|
|
if ( (record = *scb->scb_next_pointer++) )
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
record = get_merge(scb->scb_merge, scb);
|
|
|
|
|
|
|
|
*record_address = (ULONG *) record;
|
|
|
|
|
|
|
|
if (record) {
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) record->sort_record_key, scb, false);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
2006-10-07 12:53:01 +02:00
|
|
|
|
2007-12-03 16:46:39 +01:00
|
|
|
tdbb->bumpStats(RuntimeStatistics::SORT_GETS);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
sort_context* SORT_init(thread_db* tdbb,
|
|
|
|
USHORT record_length,
|
|
|
|
USHORT keys,
|
|
|
|
USHORT unique_keys,
|
|
|
|
const sort_key_def* key_description,
|
|
|
|
FPTR_REJECT_DUP_CALLBACK call_back,
|
|
|
|
void* user_arg,
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 max_records)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ i n i t
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Initialize for a sort. All we really need is a description
|
|
|
|
* of the sort keys. Return the address of a sort context block.
|
|
|
|
* If duplicate control is required, the user may specify a call
|
|
|
|
* back routine. If supplied, the call back routine is called
|
|
|
|
* with three argument: the two records and the user supplied
|
|
|
|
* argument. If the call back routine returns TRUE, the second
|
|
|
|
* duplicate record is eliminated.
|
|
|
|
*
|
2005-05-28 00:45:31 +02:00
|
|
|
* hvlad: when duplicates are eliminating only first unique_keys will be
|
|
|
|
* compared. This is used at creation of unique index since sort key
|
2004-11-01 08:51:55 +01:00
|
|
|
* includes index key (which must be unique) and record numbers
|
|
|
|
*
|
2001-05-23 15:26:42 +02:00
|
|
|
**************************************/
|
2006-05-31 10:53:00 +02:00
|
|
|
SET_TDBB(tdbb);
|
|
|
|
|
2007-12-03 16:46:39 +01:00
|
|
|
MemoryPool* const pool = tdbb->getDatabase()->dbb_permanent;
|
2006-05-31 10:53:00 +02:00
|
|
|
ISC_STATUS* status_vector = tdbb->tdbb_status_vector;
|
|
|
|
sort_context* scb = NULL;
|
|
|
|
|
|
|
|
try {
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Allocate and setup a sort context block, including copying the
|
|
|
|
// key description vector. Round the record length up to the next
|
|
|
|
// longword, and add a longword to a pointer back to the pointer slot.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
scb = (sort_context*) pool->allocate(SCB_LEN(keys));
|
|
|
|
memset(scb, 0, SCB_LEN(keys));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
scb->scb_pool = pool;
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_status_vector = status_vector;
|
2006-02-23 06:08:26 +01:00
|
|
|
//scb->scb_length = record_length;
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_longs =
|
2007-03-27 13:46:47 +02:00
|
|
|
ROUNDUP(record_length + SIZEOF_SR_BCKPTR, ALIGNMENT) >> SHIFTLONG;
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_dup_callback = call_back;
|
|
|
|
scb->scb_dup_callback_arg = user_arg;
|
|
|
|
scb->scb_keys = keys;
|
2006-02-23 06:08:26 +01:00
|
|
|
//scb->scb_max_records = max_records;
|
2002-11-16 18:41:12 +01:00
|
|
|
|
2004-11-01 08:51:55 +01:00
|
|
|
fb_assert(unique_keys <= keys);
|
2004-03-11 06:04:26 +01:00
|
|
|
sort_key_def* p = scb->scb_description;
|
|
|
|
const sort_key_def* q = key_description;
|
2003-12-11 11:33:30 +01:00
|
|
|
do {
|
2001-05-23 15:26:42 +02:00
|
|
|
*p++ = *q++;
|
2003-12-11 11:33:30 +01:00
|
|
|
} while (--keys);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
--p;
|
|
|
|
scb->scb_key_length =
|
|
|
|
ROUNDUP(p->skd_offset + p->skd_length, sizeof(SLONG)) >> SHIFTLONG;
|
|
|
|
|
2004-11-01 09:23:40 +01:00
|
|
|
while (unique_keys < scb->scb_keys) {
|
2004-11-01 08:51:55 +01:00
|
|
|
p--;
|
|
|
|
unique_keys++;
|
|
|
|
}
|
|
|
|
scb->scb_unique_length =
|
|
|
|
ROUNDUP(p->skd_offset + p->skd_length, sizeof(SLONG)) >> SHIFTLONG;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Next, try to allocate a "big block". How big? Big enough!
|
2003-09-28 20:49:21 +02:00
|
|
|
#ifdef DEBUG_MERGE
|
2004-03-09 01:17:07 +01:00
|
|
|
// To debug the merge algorithm, force the in-memory pool to be VERY small
|
|
|
|
scb->scb_size_memory = 2000;
|
|
|
|
scb->scb_memory =
|
2006-05-31 10:53:00 +02:00
|
|
|
(SORTP *) scb->scb_pool->allocate(scb->scb_size_memory);
|
2003-09-28 20:49:21 +02:00
|
|
|
#else
|
2004-03-09 01:17:07 +01:00
|
|
|
// Try to get a big chunk of memory, if we can't try smaller and
|
|
|
|
// smaller chunks until we can get the memory. If we get down to
|
|
|
|
// too small a chunk - punt and report not enough memory.
|
2001-12-24 03:51:06 +01:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
for (scb->scb_size_memory = MAX_SORT_BUFFER_SIZE;
|
|
|
|
scb->scb_size_memory >= MIN_SORT_BUFFER_SIZE;
|
2004-03-09 01:17:07 +01:00
|
|
|
scb->scb_size_memory -= SORT_BUFFER_CHUNK_SIZE)
|
2004-03-11 06:04:26 +01:00
|
|
|
{
|
2006-05-31 10:53:00 +02:00
|
|
|
try {
|
|
|
|
scb->scb_memory =
|
|
|
|
(SORTP *) scb->scb_pool->allocate(scb->scb_size_memory);
|
2004-03-09 01:17:07 +01:00
|
|
|
break;
|
2004-03-11 06:04:26 +01:00
|
|
|
}
|
2006-05-31 10:53:00 +02:00
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
// not enough memory, let's allocate smaller buffer
|
|
|
|
}
|
2004-03-11 06:04:26 +01:00
|
|
|
}
|
2006-05-31 10:53:00 +02:00
|
|
|
|
|
|
|
if (scb->scb_size_memory < MIN_SORT_BUFFER_SIZE)
|
|
|
|
Firebird::BadAlloc::raise();
|
2003-09-29 13:00:38 +02:00
|
|
|
#endif // DEBUG_MERGE
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
scb->scb_end_memory =
|
|
|
|
(SORTP *) ((BLOB_PTR *) scb->scb_memory + scb->scb_size_memory);
|
2004-03-19 07:14:53 +01:00
|
|
|
scb->scb_first_pointer = (sort_record**) scb->scb_memory;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
// Set up the temp space
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
scb->scb_space = FB_NEW(*pool) TempSpace(*pool, SCRATCH);
|
2006-05-31 10:53:00 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Set up to receive the first record
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
init(scb);
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If a linked list pointer was given, link in new sort block
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-12-03 16:46:39 +01:00
|
|
|
Attachment* att = tdbb->getAttachment();
|
2001-05-23 15:26:42 +02:00
|
|
|
if (att) {
|
|
|
|
scb->scb_next = att->att_active_sorts;
|
|
|
|
att->att_active_sorts = scb;
|
|
|
|
scb->scb_attachment = att;
|
|
|
|
}
|
|
|
|
|
|
|
|
return scb;
|
2006-05-31 10:53:00 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
*status_vector++ = isc_arg_gds;
|
|
|
|
*status_vector++ = isc_sort_mem_err;
|
|
|
|
*status_vector = isc_arg_end;
|
|
|
|
delete scb;
|
2006-06-02 06:10:04 +02:00
|
|
|
ERR_punt();
|
2006-05-31 10:53:00 +02:00
|
|
|
}
|
2006-06-02 06:10:04 +02:00
|
|
|
|
|
|
|
return NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-07 12:53:01 +02:00
|
|
|
void SORT_put(thread_db* tdbb, sort_context* scb, ULONG ** record_address)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ p u t
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Allocate space for a record for sort. The caller is responsible
|
|
|
|
* for moving in the record.
|
|
|
|
*
|
|
|
|
* Records are added from the top (higher addresses) of sort memory going down. Record
|
|
|
|
* pointers are added at the bottom (lower addresses) of sort memory going up. When
|
|
|
|
* they overlap, the records in memory are sorted and written to a "run"
|
|
|
|
* in the scratch files. The runs are eventually merged.
|
|
|
|
*
|
|
|
|
**************************************/
|
2006-10-07 12:53:01 +02:00
|
|
|
scb->scb_status_vector = tdbb->tdbb_status_vector;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Find the last record passed in, and zap the keys something comparable
|
|
|
|
// by unsigned longword compares
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-28 20:49:21 +02:00
|
|
|
SR* record = scb->scb_last_record;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (record != (SR *) scb->scb_end_memory)
|
2006-05-31 10:53:00 +02:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) (record->sr_sort_record.sort_record_key),
|
2003-09-29 13:00:38 +02:00
|
|
|
scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) (record->sr_sort_record.sort_record_key), scb,
|
2003-09-29 13:00:38 +02:00
|
|
|
true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
2006-05-31 10:53:00 +02:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
// If there isn't room for the record, sort and write the run.
|
2003-09-29 13:00:38 +02:00
|
|
|
// Check that we are not at the beginning of the buffer in addition
|
|
|
|
// to checking for space for the record. This avoids the pointer
|
|
|
|
// record from underflowing in the second condition.
|
2001-05-23 15:26:42 +02:00
|
|
|
if ((BLOB_PTR *) record < (BLOB_PTR *) (scb->scb_memory + scb->scb_longs)
|
2003-12-11 11:33:30 +01:00
|
|
|
|| (BLOB_PTR *) NEXT_RECORD(record) <= (BLOB_PTR *) (scb->scb_next_pointer + 1))
|
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
put_run(scb);
|
2003-09-29 13:00:38 +02:00
|
|
|
while (true) {
|
2004-03-19 07:14:53 +01:00
|
|
|
run_control* run = scb->scb_runs;
|
2003-12-11 11:33:30 +01:00
|
|
|
const USHORT depth = run->run_depth;
|
2003-09-29 13:00:38 +02:00
|
|
|
if (depth == MAX_MERGE_LEVEL)
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
2003-09-29 13:00:38 +02:00
|
|
|
USHORT count = 1;
|
2001-05-23 15:26:42 +02:00
|
|
|
while ((run = run->run_next) && run->run_depth == depth)
|
|
|
|
count++;
|
|
|
|
if (count < RUN_GROUP)
|
|
|
|
break;
|
|
|
|
merge_runs(scb, count);
|
|
|
|
}
|
|
|
|
init(scb);
|
|
|
|
record = scb->scb_last_record;
|
|
|
|
}
|
|
|
|
|
|
|
|
record = NEXT_RECORD(record);
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Make sure the first longword of the record points to the pointer
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_last_record = record;
|
|
|
|
record->sr_bckptr = scb->scb_next_pointer;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Move key_id into *scb->scb_next_pointer and then
|
|
|
|
// increment scb->scb_next_pointer
|
2001-05-23 15:26:42 +02:00
|
|
|
*scb->scb_next_pointer++ =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<sort_record*>(record->sr_sort_record.sort_record_key);
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifndef SCROLLABLE_CURSORS
|
|
|
|
scb->scb_records++;
|
|
|
|
#endif
|
|
|
|
*record_address = (ULONG *) record->sr_sort_record.sort_record_key;
|
2006-10-07 12:53:01 +02:00
|
|
|
|
2007-12-03 16:46:39 +01:00
|
|
|
tdbb->bumpStats(RuntimeStatistics::SORT_PUTS);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
void SORT_read_block(
|
|
|
|
#else
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 SORT_read_block(
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
2006-06-05 16:39:33 +02:00
|
|
|
ISC_STATUS* status_vector,
|
2007-04-03 17:01:37 +02:00
|
|
|
TempSpace* tmp_space,
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 seek,
|
2006-06-05 16:39:33 +02:00
|
|
|
BLOB_PTR* address,
|
|
|
|
ULONG length)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ r e a d _ b l o c k
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Read a block of stuff from a scratch file.
|
|
|
|
*
|
|
|
|
**************************************/
|
2006-05-31 10:53:00 +02:00
|
|
|
try {
|
2007-04-03 17:01:37 +02:00
|
|
|
const size_t bytes = tmp_space->read(seek, address, length);
|
2006-05-31 10:53:00 +02:00
|
|
|
fb_assert(bytes == length);
|
|
|
|
seek += bytes;
|
|
|
|
}
|
|
|
|
catch (const Firebird::status_exception& ex) {
|
|
|
|
Firebird::stuff_exception(status_vector, ex);
|
2006-06-18 12:43:55 +02:00
|
|
|
ERR_post(isc_sort_err, 0);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
#ifndef SCROLLABLE_CURSORS
|
|
|
|
return seek;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-18 06:56:06 +01:00
|
|
|
void SORT_shutdown(Attachment* att)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ s h u t d o w n
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Clean up any pending sorts.
|
|
|
|
*
|
|
|
|
**************************************/
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// We ignore the result from local_fini,
|
|
|
|
// since the expectation is that from the
|
|
|
|
// way we are passing in the structures
|
2004-03-19 07:14:53 +01:00
|
|
|
// that every sort_context *IS* part of the ptr
|
2003-09-29 13:00:38 +02:00
|
|
|
// chain. Also, we're not freeing the
|
|
|
|
// structure here, so if something goes
|
|
|
|
// wrong, it's not *CRITICAL*. -- mrs
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
while (att->att_active_sorts)
|
|
|
|
local_fini(att->att_active_sorts, att);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-07 12:53:01 +02:00
|
|
|
void SORT_sort(thread_db* tdbb, sort_context* scb)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ s o r t
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Perform any intermediate computing before giving records
|
|
|
|
* back. If there weren't any runs, run sort the buffer.
|
2004-03-19 07:14:53 +01:00
|
|
|
* If there were runs, sort and write out the last run_control and
|
2001-05-23 15:26:42 +02:00
|
|
|
* build a merge tree.
|
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-19 07:14:53 +01:00
|
|
|
run_control* run;
|
2004-03-28 11:10:30 +02:00
|
|
|
merge_control* merge;
|
|
|
|
merge_control* merge_pool;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-10-07 12:53:01 +02:00
|
|
|
scb->scb_status_vector = tdbb->tdbb_status_vector;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
try {
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
if (scb->scb_last_record != (SR *) scb->scb_end_memory)
|
2006-05-31 10:53:00 +02:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) KEYOF(scb->scb_last_record), scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) KEYOF(scb->scb_last_record), scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
2006-05-31 10:53:00 +02:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If there aren't any runs, things fit nicely in memory. Just sort the mess
|
|
|
|
// and we're ready for output.
|
2001-05-23 15:26:42 +02:00
|
|
|
if (!scb->scb_runs) {
|
|
|
|
sort(scb);
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
scb->scb_last_pointer = scb->scb_next_pointer - 1;
|
|
|
|
#endif
|
|
|
|
scb->scb_next_pointer = scb->scb_first_pointer + 1;
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
scb->scb_flags |= scb_initialized;
|
|
|
|
#endif
|
|
|
|
scb->scb_flags |= scb_sorted;
|
2007-12-03 16:46:39 +01:00
|
|
|
tdbb->bumpStats(RuntimeStatistics::SORTS);
|
2006-05-31 10:53:00 +02:00
|
|
|
return;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
// Write the last records as a run_control
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
put_run(scb);
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
CHECK_FILE(scb);
|
|
|
|
|
|
|
|
// Merge runs of low depth to free memory part of temp space
|
|
|
|
// they use and to make total runs count lower. This is fast
|
|
|
|
// because low depth runs usually sit in memory
|
|
|
|
ULONG run_count = 0, low_depth_cnt = 0;
|
|
|
|
for (run = scb->scb_runs; run; run = run->run_next) {
|
|
|
|
++run_count;
|
|
|
|
if (run->run_depth < MAX_MERGE_LEVEL)
|
|
|
|
low_depth_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (low_depth_cnt > 1 && low_depth_cnt < run_count)
|
|
|
|
{
|
|
|
|
merge_runs(scb, low_depth_cnt);
|
|
|
|
CHECK_FILE(scb);
|
|
|
|
}
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
// Build a merge tree for the run_control blocks. Start by laying them all out
|
2003-09-29 13:00:38 +02:00
|
|
|
// in a vector. This is done to allow us to build a merge tree from the
|
|
|
|
// bottom up, ensuring that a balanced tree is built.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
for (run_count = 0, run = scb->scb_runs; run; run = run->run_next) {
|
|
|
|
if (run->run_buff_alloc) {
|
2006-05-31 10:53:00 +02:00
|
|
|
delete run->run_buffer;
|
|
|
|
run->run_buff_alloc = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
++run_count;
|
|
|
|
}
|
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
run_merge_hdr** streams =
|
|
|
|
(run_merge_hdr**) scb->scb_pool->allocate(run_count * sizeof(run_merge_hdr*));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
run_merge_hdr** m1 = streams;
|
2001-05-23 15:26:42 +02:00
|
|
|
for (run = scb->scb_runs; run; run = run->run_next)
|
2004-03-28 11:10:30 +02:00
|
|
|
*m1++ = (run_merge_hdr*) run;
|
|
|
|
ULONG count = run_count;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// We're building a b-tree of the sort merge blocks, we have (count)
|
|
|
|
// leaves already, so we *know* we need (count-1) merge blocks.
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
if (count > 1) {
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert(!scb->scb_merge_pool); // shouldn't have a pool
|
2006-05-31 10:53:00 +02:00
|
|
|
try {
|
|
|
|
scb->scb_merge_pool =
|
|
|
|
(merge_control*) scb->scb_pool->allocate((count - 1) * sizeof(merge_control));
|
|
|
|
merge_pool = scb->scb_merge_pool;
|
|
|
|
memset(merge_pool, 0, (count - 1) * sizeof(merge_control));
|
|
|
|
}
|
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
delete streams;
|
|
|
|
throw;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2003-09-29 13:00:38 +02:00
|
|
|
// Merge of 1 or 0 runs doesn't make sense
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert(false); // We really shouldn't get here
|
2004-03-28 11:10:30 +02:00
|
|
|
merge = (merge_control*) * streams; // But if we do...
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Each pass through the vector builds a level of the merge tree
|
|
|
|
// by condensing two runs into one.
|
|
|
|
// We will continue to make passes until there is a single item.
|
|
|
|
//
|
|
|
|
// See also kissing cousin of this loop in merge_runs()
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
while (count > 1) {
|
2004-03-28 11:10:30 +02:00
|
|
|
run_merge_hdr** m2 = m1 = streams;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2005-05-28 00:45:31 +02:00
|
|
|
// "m1" is used to sequence through the runs being merged,
|
2003-09-29 13:00:38 +02:00
|
|
|
// while "m2" points at the new merged run
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
while (count >= 2) {
|
|
|
|
merge = merge_pool++;
|
2006-02-23 06:08:26 +01:00
|
|
|
merge->mrg_header.rmh_type = RMH_TYPE_MRG;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-02-23 06:08:26 +01:00
|
|
|
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || // garbage watch
|
|
|
|
((*m1)->rmh_type == RMH_TYPE_RUN));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
(*m1)->rmh_parent = merge;
|
|
|
|
merge->mrg_stream_a = *m1++;
|
|
|
|
|
2006-02-23 06:08:26 +01:00
|
|
|
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || // garbage watch
|
|
|
|
((*m1)->rmh_type == RMH_TYPE_RUN));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
(*m1)->rmh_parent = merge;
|
|
|
|
merge->mrg_stream_b = *m1++;
|
|
|
|
|
2003-08-28 15:16:03 +02:00
|
|
|
merge->mrg_record_a = NULL;
|
|
|
|
merge->mrg_record_b = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
*m2++ = (run_merge_hdr*) merge;
|
2001-05-23 15:26:42 +02:00
|
|
|
count -= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count)
|
|
|
|
*m2++ = *m1++;
|
|
|
|
count = m2 - streams;
|
|
|
|
}
|
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
delete streams;
|
2004-03-28 11:10:30 +02:00
|
|
|
|
|
|
|
SORTP* buffer = (SORTP *) scb->scb_first_pointer;
|
2001-05-23 15:26:42 +02:00
|
|
|
merge->mrg_header.rmh_parent = NULL;
|
|
|
|
scb->scb_merge = merge;
|
|
|
|
scb->scb_longs -= SIZEOF_SR_BCKPTR_IN_LONGS;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Divvy up the sort space among buffers for runs. Although something slightly
|
|
|
|
// better could be arranged, for now give them all the same size hunk.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
/**
|
2004-03-28 11:10:30 +02:00
|
|
|
ULONG size;
|
|
|
|
const ULONG temp = DIFF_LONGS(scb->scb_end_memory, buffer);
|
2001-05-23 15:26:42 +02:00
|
|
|
count = temp / (scb->scb_longs * run_count);
|
|
|
|
if (count) {
|
|
|
|
size = count * (SSHORT) scb->scb_longs;
|
|
|
|
count = run_count;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
size = (SSHORT) scb->scb_longs;
|
|
|
|
count = temp / scb->scb_longs;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Allocate buffer space for either all the runs, if they fit, or for
|
|
|
|
// as many as allow
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
for (run = scb->scb_runs; run && count; count--, run = run->run_next) {
|
|
|
|
run->run_buffer = buffer;
|
|
|
|
buffer += size;
|
|
|
|
run->run_record =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<sort_record*>(run->run_end_buffer = buffer);
|
2007-04-03 17:01:37 +02:00
|
|
|
run->run_buff_cache = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If there was not enough buffer space, get some more for the remaining runs
|
|
|
|
// allocating enough for the merge space plus a link
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
for (; run; run = run->run_next) {
|
2004-03-09 01:17:07 +01:00
|
|
|
run->run_buffer =
|
2006-05-31 10:53:00 +02:00
|
|
|
(ULONG*) scb->scb_pool->allocate(size * sizeof(ULONG));
|
|
|
|
run->run_buff_alloc = true;
|
2001-05-23 15:26:42 +02:00
|
|
|
run->run_record =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<sort_record*>(run->run_end_buffer =
|
2007-04-03 17:01:37 +02:00
|
|
|
run->run_buffer + size);
|
|
|
|
run->run_buff_cache = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
2007-04-03 17:01:37 +02:00
|
|
|
**/
|
|
|
|
// Allocate space for runs. The more memory we assign to each run the
|
|
|
|
// faster we will read scratch file and return sorted records to caller.
|
|
|
|
// At first try to reuse free memory from temp space. Note that temp space
|
|
|
|
// itself allocated memory by at least TempSpace::getMinBlockSize chunks.
|
|
|
|
// As we need contiguous memory don't ask for bigger parts
|
|
|
|
ULONG allocSize = MAX_SORT_BUFFER_SIZE * RUN_GROUP;
|
|
|
|
ULONG allocated = allocate_memory(scb, run_count, allocSize, true);
|
|
|
|
|
|
|
|
if (allocated < run_count)
|
|
|
|
{
|
|
|
|
const USHORT rec_size = scb->scb_longs << SHIFTLONG;
|
|
|
|
allocSize = MAX_SORT_BUFFER_SIZE * RUN_GROUP;
|
|
|
|
for (run = scb->scb_runs; run; run = run->run_next)
|
|
|
|
{
|
|
|
|
if (!run->run_buffer)
|
|
|
|
{
|
|
|
|
int mem_size = MIN(allocSize / rec_size, run->run_records) * rec_size;
|
|
|
|
char* mem = NULL;
|
|
|
|
try {
|
|
|
|
mem = (char*) scb->scb_pool->allocate(mem_size);
|
|
|
|
}
|
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
mem_size = (mem_size / (2 * rec_size)) * rec_size;
|
|
|
|
if (!mem_size)
|
|
|
|
throw;
|
|
|
|
mem = (char*) scb->scb_pool->allocate(mem_size);
|
|
|
|
}
|
|
|
|
run->run_buff_alloc = true;
|
|
|
|
run->run_buff_cache = false;
|
|
|
|
|
|
|
|
run->run_buffer = reinterpret_cast<SORTP*> (mem);
|
|
|
|
mem += mem_size;
|
|
|
|
run->run_record = reinterpret_cast<sort_record*>(mem);
|
|
|
|
run->run_end_buffer = reinterpret_cast<SORTP*> (mem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sort_runs_by_seek(scb, run_count);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
scb->scb_flags |= scb_sorted;
|
2007-12-03 16:46:39 +01:00
|
|
|
tdbb->bumpStats(RuntimeStatistics::SORTS);
|
2006-05-31 10:53:00 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
error_memory(scb);
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 SORT_write_block(ISC_STATUS* status_vector,
|
2007-04-03 17:01:37 +02:00
|
|
|
TempSpace* tmp_space,
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 seek,
|
2006-06-05 16:39:33 +02:00
|
|
|
BLOB_PTR* address,
|
|
|
|
ULONG length)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* S O R T _ w r i t e _ b l o c k
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Write a block of stuff to the scratch file.
|
|
|
|
*
|
|
|
|
**************************************/
|
2006-05-31 10:53:00 +02:00
|
|
|
try {
|
2007-04-03 17:01:37 +02:00
|
|
|
const size_t bytes = tmp_space->write(seek, address, length);
|
2006-05-31 10:53:00 +02:00
|
|
|
fb_assert(bytes == length);
|
|
|
|
seek += bytes;
|
|
|
|
}
|
|
|
|
catch (const Firebird::status_exception& ex) {
|
|
|
|
Firebird::stuff_exception(status_vector, ex);
|
2006-06-18 12:43:55 +02:00
|
|
|
ERR_post(isc_sort_err, 0);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return seek;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef SCROLLABLE_CURSORS
|
2002-09-17 07:58:40 +02:00
|
|
|
#ifdef WORDS_BIGENDIAN
|
2008-01-16 10:48:41 +01:00
|
|
|
static void diddle_key(UCHAR* record, sort_context* scb, bool direction)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* d i d d l e _ k e y ( n o n - V A X )
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Perform transformation between the natural form of a record
|
|
|
|
* and a form that can be sorted in unsigned comparison order.
|
|
|
|
*
|
|
|
|
* direction - TRUE for SORT_put() and FALSE for SORT_get()
|
|
|
|
*
|
|
|
|
**************************************/
|
2003-09-29 13:00:38 +02:00
|
|
|
UCHAR *fill_pos, fill_char;
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT fill, flag;
|
2003-09-29 13:00:38 +02:00
|
|
|
|
2004-03-11 06:04:26 +01:00
|
|
|
for (sort_key_def* key = scb->scb_description, *end = key + scb->scb_keys;
|
2003-09-29 13:00:38 +02:00
|
|
|
key < end; key++)
|
|
|
|
{
|
|
|
|
UCHAR* p = record + key->skd_offset;
|
|
|
|
USHORT n = key->skd_length;
|
|
|
|
USHORT complement = key->skd_flags & SKD_descending;
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
switch (key->skd_dtype) {
|
|
|
|
case SKD_ulong:
|
|
|
|
case SKD_ushort:
|
|
|
|
case SKD_bytes:
|
|
|
|
case SKD_sql_time:
|
|
|
|
break;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Stash embedded control info for non-fixed data types in the sort
|
|
|
|
// record and zap it so that it doesn't interfere with collation
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
case SKD_varying:
|
2008-01-16 10:48:41 +01:00
|
|
|
if (direction)
|
|
|
|
{
|
|
|
|
USHORT& vlen = ((vary*) p)->vary_length;
|
|
|
|
if (!(scb->scb_flags & scb_sorted))
|
|
|
|
{
|
|
|
|
*((USHORT *) (record + key->skd_vary_offset)) = vlen;
|
|
|
|
fill_char = (key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
|
|
|
fill_pos = p + sizeof(USHORT) + vlen;
|
|
|
|
fill = n - sizeof(USHORT) - vlen;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
vlen = 0;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_cstring:
|
|
|
|
if (direction) {
|
|
|
|
fill_char = (key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
|
|
|
if (!(scb->scb_flags & scb_sorted)) {
|
2008-01-16 10:48:41 +01:00
|
|
|
const USHORT l = strlen(reinterpret_cast<char*>(p));
|
|
|
|
*((USHORT *) (record + key->skd_vary_offset)) = l;
|
2001-05-23 15:26:42 +02:00
|
|
|
fill_pos = p + l;
|
|
|
|
fill = n - l;
|
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
|
|
|
else {
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT l = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = fill_char;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_text:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_d_float:
|
2003-09-29 13:00:38 +02:00
|
|
|
case SKD_float:
|
|
|
|
case SKD_double:
|
|
|
|
flag = (direction || !complement)
|
2001-05-23 15:26:42 +02:00
|
|
|
? direction : TRUE;
|
|
|
|
if (flag ^ (*p >> 7))
|
|
|
|
*p ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_long:
|
|
|
|
case SKD_short:
|
|
|
|
case SKD_quad:
|
|
|
|
case SKD_timestamp1:
|
|
|
|
case SKD_timestamp2:
|
|
|
|
case SKD_sql_date:
|
|
|
|
case SKD_int64:
|
|
|
|
*p ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert(false);
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (complement && n)
|
|
|
|
do
|
|
|
|
*p++ ^= -1;
|
|
|
|
while (--n);
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Flatter but don't complement control info for non-fixed
|
|
|
|
// data types when restoring the data
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_varying && !direction) {
|
|
|
|
p = record + key->skd_offset;
|
2004-02-20 07:43:27 +01:00
|
|
|
((vary*) p)->vary_length = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_cstring && !direction) {
|
|
|
|
p = record + key->skd_offset;
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT l = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
static void diddle_key(UCHAR* record, sort_context* scb, bool direction)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* d i d d l e _ k e y ( V A X )
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Perform transformation between the natural form of a record
|
|
|
|
* and a form that can be sorted in unsigned comparison order.
|
|
|
|
*
|
|
|
|
* direction - TRUE for SORT_put() and FALSE for SORT_get()
|
|
|
|
*
|
|
|
|
**************************************/
|
2001-12-24 03:51:06 +01:00
|
|
|
UCHAR c1, fill_char, *fill_pos;
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT fill;
|
2001-05-23 15:26:42 +02:00
|
|
|
SSHORT longs, flag;
|
|
|
|
ULONG lw;
|
2001-12-24 03:51:06 +01:00
|
|
|
#ifndef IEEE
|
|
|
|
USHORT w;
|
|
|
|
#endif
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-11 06:04:26 +01:00
|
|
|
for (sort_key_def* key = scb->scb_description, *end = key + scb->scb_keys;
|
2003-09-29 13:00:38 +02:00
|
|
|
key < end; key++)
|
|
|
|
{
|
|
|
|
BLOB_PTR* p = (BLOB_PTR *) record + key->skd_offset;
|
|
|
|
USHORT* wp = (USHORT *) p;
|
|
|
|
SORTP* lwp = (SORTP *) p;
|
|
|
|
USHORT complement = key->skd_flags & SKD_descending;
|
|
|
|
USHORT n = ROUNDUP(key->skd_length, sizeof(SLONG));
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
switch (key->skd_dtype) {
|
|
|
|
case SKD_timestamp1:
|
|
|
|
case SKD_timestamp2:
|
|
|
|
case SKD_sql_time:
|
|
|
|
case SKD_sql_date:
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_ulong:
|
|
|
|
case SKD_ushort:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_text:
|
|
|
|
case SKD_bytes:
|
|
|
|
case SKD_cstring:
|
|
|
|
case SKD_varying:
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Stash embedded control info for non-fixed data types in the sort
|
|
|
|
// record and zap it so that it doesn't interfere with collation
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_varying && direction) {
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT& vlen = ((vary*) p)->vary_length;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (!(scb->scb_flags & scb_sorted)) {
|
2008-01-16 10:48:41 +01:00
|
|
|
*((USHORT *) (record + key->skd_vary_offset)) = vlen;
|
2001-05-23 15:26:42 +02:00
|
|
|
fill_char =
|
|
|
|
(key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
2008-01-16 10:48:41 +01:00
|
|
|
fill_pos = p + sizeof(USHORT) + vlen;
|
|
|
|
fill = n - sizeof(USHORT) - vlen;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
vlen = 0;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_cstring && direction) {
|
|
|
|
fill_char = (key->skd_flags & SKD_binary) ? 0 : ASCII_SPACE;
|
|
|
|
if (!(scb->scb_flags & scb_sorted)) {
|
2008-01-16 10:48:41 +01:00
|
|
|
const USHORT l = strlen(reinterpret_cast<char*>(p));
|
|
|
|
*((USHORT *) (record + key->skd_vary_offset)) = l;
|
2001-05-23 15:26:42 +02:00
|
|
|
fill_pos = p + l;
|
|
|
|
fill = n - l;
|
|
|
|
if (fill)
|
|
|
|
memset(fill_pos, fill_char, fill);
|
|
|
|
}
|
|
|
|
else {
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT l = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = fill_char;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
longs = n >> SHIFTLONG;
|
|
|
|
while (--longs >= 0) {
|
|
|
|
c1 = p[3];
|
|
|
|
p[3] = *p;
|
|
|
|
*p++ = c1;
|
|
|
|
c1 = p[1];
|
|
|
|
p[1] = *p;
|
|
|
|
*p = c1;
|
|
|
|
p += 3;
|
|
|
|
}
|
|
|
|
p = (BLOB_PTR *) wp;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_short:
|
|
|
|
p[1] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_long:
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_quad:
|
|
|
|
p[7] ^= 1 << 7;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_int64:
|
2003-09-29 13:00:38 +02:00
|
|
|
// INT64's fit in TWO LONGS, and hence the SWAP has to happen
|
|
|
|
// here for the right order comparison using DO_32_COMPARE
|
2001-05-23 15:26:42 +02:00
|
|
|
if (!direction)
|
|
|
|
SWAP_LONGS(lwp[0], lwp[1], lw);
|
|
|
|
|
|
|
|
p[7] ^= 1 << 7;
|
|
|
|
|
|
|
|
if (direction)
|
|
|
|
SWAP_LONGS(lwp[0], lwp[1], lw);
|
|
|
|
break;
|
|
|
|
|
|
|
|
#ifdef IEEE
|
|
|
|
case SKD_double:
|
|
|
|
if (!direction) {
|
|
|
|
lw = lwp[0];
|
|
|
|
lwp[0] = lwp[1];
|
|
|
|
lwp[1] = lw;
|
|
|
|
}
|
|
|
|
flag = (direction || !complement) ? direction : TRUE;
|
|
|
|
if (flag ^ (p[7] >> 7))
|
|
|
|
p[7] ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
if (direction) {
|
|
|
|
lw = lwp[0];
|
|
|
|
lwp[0] = lwp[1];
|
|
|
|
lwp[1] = lw;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SKD_float:
|
|
|
|
flag = (direction || !complement) ? direction : TRUE;
|
|
|
|
if (flag ^ (p[3] >> 7))
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
break;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
#else // IEEE
|
2001-05-23 15:26:42 +02:00
|
|
|
case SKD_double:
|
|
|
|
w = wp[2];
|
|
|
|
wp[2] = wp[3];
|
|
|
|
wp[3] = w;
|
|
|
|
|
|
|
|
case SKD_d_float:
|
|
|
|
case SKD_float:
|
|
|
|
if (!direction)
|
2008-01-16 10:48:41 +01:00
|
|
|
{
|
|
|
|
if (complement)
|
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (p[3] & 1 << 7)
|
|
|
|
complement = !complement;
|
|
|
|
else
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
else
|
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (p[3] & 1 << 7)
|
|
|
|
p[3] ^= 1 << 7;
|
|
|
|
else
|
|
|
|
complement = !complement;
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
w = wp[0];
|
|
|
|
wp[0] = wp[1];
|
|
|
|
wp[1] = w;
|
|
|
|
if (direction)
|
2008-01-16 10:48:41 +01:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (p[3] & 1 << 7)
|
|
|
|
complement = !complement;
|
|
|
|
else
|
|
|
|
p[3] ^= 1 << 7;
|
2008-01-16 10:48:41 +01:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
2003-09-29 13:00:38 +02:00
|
|
|
#endif // IEEE
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
default:
|
2006-07-20 13:11:37 +02:00
|
|
|
fb_assert(false);
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (complement && n)
|
2004-02-20 07:43:27 +01:00
|
|
|
do {
|
2001-05-23 15:26:42 +02:00
|
|
|
*p++ ^= -1;
|
2004-02-20 07:43:27 +01:00
|
|
|
} while (--n);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Flatter but don't complement control info for non-fixed
|
|
|
|
// data types when restoring the data
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_varying && !direction) {
|
|
|
|
p = (BLOB_PTR *) record + key->skd_offset;
|
2008-01-16 10:48:41 +01:00
|
|
|
((vary*) p)->vary_length = *((USHORT*) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (key->skd_dtype == SKD_cstring && !direction) {
|
|
|
|
p = (BLOB_PTR *) record + key->skd_offset;
|
2008-01-16 10:48:41 +01:00
|
|
|
USHORT l = *((USHORT *) (record + key->skd_vary_offset));
|
2001-05-23 15:26:42 +02:00
|
|
|
*(p + l) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
static void error_memory(sort_context* scb)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* e r r o r _ m e m o r y
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Report fatal out of memory error.
|
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-28 11:10:30 +02:00
|
|
|
ISC_STATUS* status_vector = scb->scb_status_vector;
|
2006-05-31 10:53:00 +02:00
|
|
|
fb_assert(status_vector);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-11-11 13:19:20 +01:00
|
|
|
*status_vector++ = isc_arg_gds;
|
|
|
|
*status_vector++ = isc_sort_mem_err;
|
|
|
|
*status_vector = isc_arg_end;
|
2006-05-31 10:53:00 +02:00
|
|
|
|
2006-06-02 06:10:04 +02:00
|
|
|
ERR_punt();
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-12 15:26:44 +01:00
|
|
|
static inline FB_UINT64 find_file_space(sort_context* scb, ULONG size)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* f i n d _ f i l e _ s p a c e
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
2005-05-28 00:45:31 +02:00
|
|
|
* Find space of input size in one of the
|
|
|
|
* open sort files. If a free block is not
|
2001-05-23 15:26:42 +02:00
|
|
|
* available, allocate space at the end.
|
|
|
|
*
|
|
|
|
**************************************/
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
return scb->scb_space->allocateSpace(size);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-12 15:26:44 +01:00
|
|
|
static inline void free_file_space(sort_context* scb, FB_UINT64 position, ULONG size)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* f r e e _ f i l e _ s p a c e
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Release a segment of work file.
|
|
|
|
*
|
|
|
|
**************************************/
|
2006-05-31 10:53:00 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
try
|
2006-05-31 10:53:00 +02:00
|
|
|
{
|
2007-04-03 17:01:37 +02:00
|
|
|
scb->scb_space->releaseSpace(position, size);
|
2006-05-31 10:53:00 +02:00
|
|
|
}
|
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
error_memory(scb);
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
static sort_record* get_merge(merge_control* merge, sort_context* scb
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
, RSE_GET_MODE mode
|
|
|
|
#endif
|
|
|
|
)
|
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* g e t _ m e r g e
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
2004-03-19 07:14:53 +01:00
|
|
|
* Get next record from a merge tree and/or run_control.
|
2001-05-23 15:26:42 +02:00
|
|
|
*
|
|
|
|
**************************************/
|
2003-09-29 13:00:38 +02:00
|
|
|
SORTP *p; // no more than 1 SORTP* to a line
|
|
|
|
SORTP *q; // no more than 1 SORTP* to a line
|
2003-02-10 14:28:35 +01:00
|
|
|
ULONG l;
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
ULONG space_available, data_remaining;
|
|
|
|
#else
|
2003-02-10 14:28:35 +01:00
|
|
|
ULONG n;
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
sort_record* record = NULL;
|
2003-09-29 13:00:38 +02:00
|
|
|
bool eof = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
while (merge) {
|
2004-03-19 07:14:53 +01:00
|
|
|
// If node is a run_control, get the next record (or not) and back to parent
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-02-23 06:08:26 +01:00
|
|
|
if (merge->mrg_header.rmh_type == RMH_TYPE_RUN) {
|
2004-03-19 07:14:53 +01:00
|
|
|
run_control* run = (run_control*) merge;
|
2001-05-23 15:26:42 +02:00
|
|
|
merge = run->run_header.rmh_parent;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// check for end-of-file condition in either direction
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
if (
|
|
|
|
(mode == RSE_get_backward
|
|
|
|
&& run->run_records >= run->run_max_records - 1)
|
|
|
|
|| (mode == RSE_get_forward && run->run_records == 0))
|
|
|
|
#else
|
|
|
|
if (run->run_records == 0)
|
|
|
|
#endif
|
|
|
|
{
|
2004-03-19 07:14:53 +01:00
|
|
|
record = (sort_record*) - 1;
|
2003-09-29 13:00:38 +02:00
|
|
|
eof = true;
|
2001-05-23 15:26:42 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
eof = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Find the appropriate record in the buffer to return
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
if (mode == RSE_get_forward) {
|
2008-01-16 10:48:41 +01:00
|
|
|
run->run_record =
|
|
|
|
reinterpret_cast<sort_record*>(NEXT_RUN_RECORD(run->run_record));
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
2007-04-03 17:01:37 +02:00
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
if ((record = (sort_record*) run->run_record) <
|
2005-05-28 00:45:31 +02:00
|
|
|
(sort_record*) run->run_end_buffer)
|
2004-03-19 07:14:53 +01:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifndef SCROLLABLE_CURSORS
|
|
|
|
run->run_record =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<sort_record*>(NEXT_RUN_RECORD(run->run_record));
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
--run->run_records;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#ifndef SCROLLABLE_CURSORS
|
2003-09-29 13:00:38 +02:00
|
|
|
// There are records remaining, but the buffer is full.
|
|
|
|
// Read a buffer full.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
l =
|
|
|
|
(ULONG) ((BLOB_PTR *) run->run_end_buffer -
|
|
|
|
(BLOB_PTR *) run->run_buffer);
|
|
|
|
n = run->run_records * scb->scb_longs * sizeof(ULONG);
|
|
|
|
l = MIN(l, n);
|
|
|
|
run->run_seek =
|
2007-04-03 17:01:37 +02:00
|
|
|
SORT_read_block(scb->scb_status_vector, scb->scb_space,
|
2006-05-31 10:53:00 +02:00
|
|
|
run->run_seek, (UCHAR*) run->run_buffer, l);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
|
|
|
}
|
|
|
|
else {
|
2008-01-16 10:48:41 +01:00
|
|
|
run->run_record =
|
|
|
|
reinterpret_cast<sort_record*>(PREV_RUN_RECORD(run->run_record));
|
2004-03-19 07:14:53 +01:00
|
|
|
if ((record = (sort_record*) run->run_record) >=
|
2008-01-16 10:48:41 +01:00
|
|
|
reinterpret_cast<sort_record*>(run->run_buffer))
|
2004-03-19 07:14:53 +01:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
++run->run_records;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-05-28 00:45:31 +02:00
|
|
|
// There are records remaining, but we have stepped over the
|
2003-09-29 13:00:38 +02:00
|
|
|
// edge of the cache. Read the next buffer full of records.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert((BLOB_PTR *) run->run_end_buffer >
|
2001-05-23 15:26:42 +02:00
|
|
|
(BLOB_PTR *) run->run_buffer);
|
|
|
|
|
|
|
|
space_available =
|
|
|
|
(ULONG) ((BLOB_PTR *) run->run_end_buffer -
|
|
|
|
(BLOB_PTR *) run->run_buffer);
|
|
|
|
if (mode == RSE_get_forward)
|
|
|
|
data_remaining =
|
|
|
|
run->run_records * scb->scb_longs * sizeof(ULONG);
|
|
|
|
else
|
|
|
|
data_remaining =
|
|
|
|
(run->run_max_records -
|
|
|
|
run->run_records) * scb->scb_longs * sizeof(ULONG);
|
|
|
|
l = MIN(space_available, data_remaining);
|
|
|
|
|
|
|
|
if (mode == RSE_get_forward)
|
|
|
|
run->run_seek += run->run_cached;
|
|
|
|
else
|
|
|
|
run->run_seek -= l;
|
|
|
|
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_read_block(scb->scb_status_vector, scb->scb_space,
|
2006-05-31 10:53:00 +02:00
|
|
|
run->run_seek, (UCHAR*) run->run_buffer, l);
|
2001-05-23 15:26:42 +02:00
|
|
|
run->run_cached = l;
|
|
|
|
|
|
|
|
if (mode == RSE_get_forward) {
|
|
|
|
#endif
|
2003-09-29 13:00:38 +02:00
|
|
|
record = reinterpret_cast<sort_record*>(run->run_buffer);
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifndef SCROLLABLE_CURSORS
|
|
|
|
run->run_record =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<sort_record*>(NEXT_RUN_RECORD(record));
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
--run->run_records;
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
}
|
|
|
|
else {
|
2008-01-16 10:48:41 +01:00
|
|
|
record = reinterpret_cast<sort_record*>(PREV_RUN_RECORD(run->run_end_buffer));
|
2001-05-23 15:26:42 +02:00
|
|
|
++run->run_records;
|
|
|
|
}
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
run->run_record = (sort_record*) record;
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If've we got a record, somebody asked for it. Find out who.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (record)
|
|
|
|
if (merge->mrg_stream_a && !merge->mrg_record_a)
|
|
|
|
if (eof)
|
|
|
|
merge->mrg_stream_a = NULL;
|
|
|
|
else
|
|
|
|
merge->mrg_record_a = record;
|
|
|
|
else if (eof)
|
|
|
|
merge->mrg_stream_b = NULL;
|
|
|
|
else
|
|
|
|
merge->mrg_record_b = record;
|
|
|
|
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If either streams need a record and is still active, loop back to pick
|
|
|
|
// up the record. If either stream is dry, return the record of the other.
|
|
|
|
// If both are dry, indicate eof for this stream.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
record = NULL;
|
2003-09-29 13:00:38 +02:00
|
|
|
eof = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (!merge->mrg_record_a && merge->mrg_stream_a) {
|
2004-03-28 11:10:30 +02:00
|
|
|
merge = (merge_control*) merge->mrg_stream_a;
|
2001-05-23 15:26:42 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!merge->mrg_record_b)
|
2008-02-26 08:23:32 +01:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (merge->mrg_stream_b) {
|
2004-03-28 11:10:30 +02:00
|
|
|
merge = (merge_control*) merge->mrg_stream_b;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
2001-12-24 03:51:06 +01:00
|
|
|
else if ( (record = merge->mrg_record_a) ) {
|
2003-08-28 15:16:03 +02:00
|
|
|
merge->mrg_record_a = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
merge = merge->mrg_header.rmh_parent;
|
|
|
|
}
|
|
|
|
else {
|
2003-09-29 13:00:38 +02:00
|
|
|
eof = true;
|
2004-03-19 07:14:53 +01:00
|
|
|
record = (sort_record*) - 1;
|
2001-05-23 15:26:42 +02:00
|
|
|
merge = merge->mrg_header.rmh_parent;
|
|
|
|
}
|
2008-02-26 08:23:32 +01:00
|
|
|
continue;
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (!merge->mrg_record_a) {
|
|
|
|
record = merge->mrg_record_b;
|
|
|
|
merge->mrg_record_b = NULL;
|
|
|
|
merge = merge->mrg_header.rmh_parent;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// We have prospective records from each of the sub-streams. Compare them.
|
|
|
|
// If equal, offer each to user routine for possible sacrifice.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
p = merge->mrg_record_a->sort_record_key;
|
|
|
|
q = merge->mrg_record_b->sort_record_key;
|
2004-11-01 08:51:55 +01:00
|
|
|
//l = scb->scb_key_length;
|
|
|
|
l = scb->scb_unique_length;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
DO_32_COMPARE(p, q, l);
|
|
|
|
|
|
|
|
if (l == 0 && scb->scb_dup_callback) {
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) merge->mrg_record_a, scb, false);
|
|
|
|
SORT_diddle_key((UCHAR*) merge->mrg_record_b, scb, false);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) merge->mrg_record_a, scb, false);
|
|
|
|
diddle_key((UCHAR*) merge->mrg_record_b, scb, false);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
2003-12-11 11:33:30 +01:00
|
|
|
if ((*scb->scb_dup_callback) ((const UCHAR*) merge->mrg_record_a,
|
|
|
|
(const UCHAR*) merge->mrg_record_b,
|
|
|
|
scb->scb_dup_callback_arg))
|
|
|
|
{
|
2003-08-28 15:16:03 +02:00
|
|
|
merge->mrg_record_a = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) merge->mrg_record_b, scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) merge->mrg_record_b, scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) merge->mrg_record_a, scb, true);
|
|
|
|
SORT_diddle_key((UCHAR*) merge->mrg_record_b, scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) merge->mrg_record_a, scb, true);
|
|
|
|
diddle_key((UCHAR*) merge->mrg_record_b, scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-11-02 16:42:18 +01:00
|
|
|
if (l == 0) {
|
|
|
|
l = scb->scb_key_length - scb->scb_unique_length;
|
|
|
|
if (l != 0)
|
|
|
|
DO_32_COMPARE(p, q, l);
|
|
|
|
}
|
2004-11-01 08:51:55 +01:00
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
if (mode == RSE_get_forward && p[-1] < q[-1])
|
|
|
|
#else
|
|
|
|
if (p[-1] < q[-1])
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
record = merge->mrg_record_a;
|
2003-08-28 15:16:03 +02:00
|
|
|
merge->mrg_record_a = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
record = merge->mrg_record_b;
|
2003-08-28 15:16:03 +02:00
|
|
|
merge->mrg_record_b = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
merge = merge->mrg_header.rmh_parent;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Merge pointer is null; we're done. Return either the most
|
|
|
|
// recent record, or end of file, as appropriate.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
return (eof) ? NULL : record;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
static void init(sort_context* scb)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* i n i t
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Initialize the sort control block for a quick sort.
|
|
|
|
*
|
|
|
|
**************************************/
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
// If we have run of MAX_MERGE_LEVEL then we have a relatively big sort.
|
|
|
|
// Grow sort buffer space to make count of final runs lower and to
|
|
|
|
// read\write scratch file by bigger chunks
|
|
|
|
// At this point we already allocated some memory for temp space so
|
|
|
|
// growing sort buffer space is not a big compared to that
|
|
|
|
|
|
|
|
if (scb->scb_size_memory <= MAX_SORT_BUFFER_SIZE && scb->scb_runs &&
|
|
|
|
scb->scb_runs->run_depth == MAX_MERGE_LEVEL)
|
|
|
|
{
|
|
|
|
void* mem = NULL;
|
|
|
|
const ULONG mem_size = MAX_SORT_BUFFER_SIZE * RUN_GROUP;
|
|
|
|
try {
|
|
|
|
mem = scb->scb_pool->allocate(mem_size);
|
|
|
|
}
|
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mem)
|
|
|
|
{
|
|
|
|
scb->scb_pool->deallocate(scb->scb_memory);
|
|
|
|
|
|
|
|
scb->scb_memory = (SORTP *) mem;
|
|
|
|
scb->scb_size_memory = mem_size;
|
|
|
|
|
|
|
|
scb->scb_end_memory =
|
|
|
|
(SORTP *) ((BLOB_PTR *) scb->scb_memory + scb->scb_size_memory);
|
|
|
|
scb->scb_first_pointer = (sort_record**) scb->scb_memory;
|
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
for (run_control *run = scb->scb_runs; run; run = run->run_next)
|
2007-04-03 17:01:37 +02:00
|
|
|
run->run_depth--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_next_pointer = scb->scb_first_pointer;
|
|
|
|
scb->scb_last_record = (SR *) scb->scb_end_memory;
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
*scb->scb_next_pointer++ = reinterpret_cast<sort_record*>(low_key);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
static bool local_fini(sort_context* scb, Attachment* att)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* l o c a l _ f i n i
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Finish sort, and release all resources.
|
|
|
|
*
|
|
|
|
**************************************/
|
2003-09-29 13:00:38 +02:00
|
|
|
bool found_it = true;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
if (att) {
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Cover case where a posted error caused reuse by another thread
|
2005-05-28 00:45:31 +02:00
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
if (scb->scb_attachment != att)
|
|
|
|
att = scb->scb_attachment;
|
2003-09-29 13:00:38 +02:00
|
|
|
found_it = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Start by unlinking from que, if present
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (att)
|
2008-01-16 10:48:41 +01:00
|
|
|
{
|
|
|
|
for (sort_context** ptr = &att->att_active_sorts; *ptr; ptr = &(*ptr)->scb_next)
|
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
if (*ptr == scb) {
|
|
|
|
*ptr = scb->scb_next;
|
2003-09-29 13:00:38 +02:00
|
|
|
found_it = true;
|
2001-05-23 15:26:42 +02:00
|
|
|
break;
|
|
|
|
}
|
2008-01-16 10:48:41 +01:00
|
|
|
}
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// *NO*. I won't free it if it's not in
|
|
|
|
// the pointer list that has been passed
|
|
|
|
// to me. THIS MEANS MEMORY LEAK. -- mrs
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
if (!found_it)
|
|
|
|
return false;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Loop through the sfb list and close work files
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
delete scb->scb_space;
|
2006-05-31 10:53:00 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Get rid of extra merge space
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2008-01-16 10:48:41 +01:00
|
|
|
ULONG** merge_buf;
|
2001-12-24 03:51:06 +01:00
|
|
|
while ( (merge_buf = (ULONG **) scb->scb_merge_space) ) {
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_merge_space = *merge_buf;
|
2006-05-31 10:53:00 +02:00
|
|
|
delete merge_buf;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If runs are allocated and not in the big block, release them.
|
|
|
|
// Then release the big block.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
delete scb->scb_memory;
|
|
|
|
scb->scb_memory = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Clean up the runs that were used
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
run_control* run;
|
2001-12-24 03:51:06 +01:00
|
|
|
while ( (run = scb->scb_runs) ) {
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_runs = run->run_next;
|
|
|
|
if (run->run_buff_alloc)
|
2006-05-31 10:53:00 +02:00
|
|
|
delete run->run_buffer;
|
|
|
|
delete run;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Clean up the free runs also
|
2005-05-28 00:45:31 +02:00
|
|
|
|
2001-12-24 03:51:06 +01:00
|
|
|
while ( (run = scb->scb_free_runs) ) {
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_free_runs = run->run_next;
|
|
|
|
if (run->run_buff_alloc)
|
2006-05-31 10:53:00 +02:00
|
|
|
delete run->run_buffer;
|
|
|
|
delete run;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
delete scb->scb_merge_pool;
|
|
|
|
scb->scb_merge_pool = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
scb->scb_merge = NULL;
|
2003-12-24 14:04:59 +01:00
|
|
|
scb->scb_attachment = NULL;
|
|
|
|
scb->scb_impure = NULL;
|
|
|
|
scb->scb_next = NULL;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
return true;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
#ifdef DEV_BUILD
|
2007-04-05 11:13:10 +02:00
|
|
|
static void check_file(const sort_context* scb, const run_control* temp_run)
|
2007-04-03 17:01:37 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* c h e c k _ f i l e
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Validate memory and file space allocation
|
|
|
|
*
|
|
|
|
**************************************/
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 runs = temp_run ? temp_run->run_size : 0;
|
2007-08-10 15:05:27 +02:00
|
|
|
offset_t free = 0;
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 run_mem = 0;
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
bool ok = scb->scb_space->validate(free);
|
|
|
|
fb_assert(ok);
|
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
for (const run_control* run = scb->scb_runs; run; run = run->run_next)
|
2007-04-03 17:01:37 +02:00
|
|
|
{
|
|
|
|
runs += run->run_size;
|
|
|
|
run_mem += run->run_mem_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
ok = (runs + run_mem + free) == scb->scb_space->getSize();
|
|
|
|
fb_assert(ok);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
static ULONG allocate_memory(sort_context* scb, ULONG n, ULONG chunkSize, bool useFreeSpace)
|
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* a l l o c a t e _ m e m o r y
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Allocate memory for first n runs
|
|
|
|
*
|
|
|
|
**************************************/
|
|
|
|
const USHORT rec_size = scb->scb_longs << SHIFTLONG;
|
|
|
|
ULONG allocated = 0, count;
|
|
|
|
run_control* run;
|
|
|
|
TempSpace* tempSpace = scb->scb_space;
|
|
|
|
|
|
|
|
// if some run's already in memory cache - use this memory
|
|
|
|
for (run = scb->scb_runs, count = 0; count < n; run = run->run_next, count++)
|
|
|
|
{
|
|
|
|
run->run_buffer = 0;
|
|
|
|
|
|
|
|
char* mem = 0;
|
|
|
|
if (mem = tempSpace->inMemory(run->run_seek, run->run_size))
|
|
|
|
{
|
2007-04-05 11:13:10 +02:00
|
|
|
run->run_buffer = reinterpret_cast<SORTP*>(mem);
|
2007-04-03 17:01:37 +02:00
|
|
|
run->run_record = reinterpret_cast<sort_record*>(mem);
|
|
|
|
mem += run->run_size;
|
2007-04-05 11:13:10 +02:00
|
|
|
run->run_end_buffer = reinterpret_cast<SORTP*>(mem);
|
2007-04-03 17:01:37 +02:00
|
|
|
run->run_seek += run->run_size; // emulate read
|
|
|
|
allocated++;
|
|
|
|
}
|
|
|
|
run->run_buff_cache = (mem != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (allocated == n || !useFreeSpace)
|
|
|
|
return allocated;
|
|
|
|
|
|
|
|
// try to use free blocks from memory cache of work file
|
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
fb_assert(n > allocated);
|
2007-04-03 17:01:37 +02:00
|
|
|
TempSpace::Segments segments(*scb->scb_pool, n - allocated);
|
|
|
|
allocated += tempSpace->allocateBatch(n - allocated,
|
|
|
|
MAX_SORT_BUFFER_SIZE, chunkSize, segments);
|
|
|
|
|
|
|
|
if (segments.getCount())
|
|
|
|
{
|
|
|
|
TempSpace::SegmentInMemory *seg = segments.begin(), *lastSeg = segments.end();
|
|
|
|
for (run = scb->scb_runs, count = 0; count < n; run = run->run_next, count++)
|
|
|
|
{
|
|
|
|
if (!run->run_buffer)
|
|
|
|
{
|
|
|
|
const size_t runSize = MIN(seg->size / rec_size, run->run_records) * rec_size;
|
|
|
|
char* mem = seg->memory;
|
|
|
|
|
|
|
|
run->run_mem_seek = seg->position;
|
|
|
|
run->run_mem_size = seg->size;
|
2007-04-05 11:13:10 +02:00
|
|
|
run->run_buffer = reinterpret_cast<SORTP*>(mem);
|
2007-04-03 17:01:37 +02:00
|
|
|
mem += runSize;
|
|
|
|
run->run_record = reinterpret_cast<sort_record*>(mem);
|
2007-04-05 11:13:10 +02:00
|
|
|
run->run_end_buffer = reinterpret_cast<SORTP*>(mem);
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
seg++;
|
|
|
|
if (seg == lastSeg)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return allocated;
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
static void merge_runs(sort_context* scb, USHORT n)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* m e r g e _ r u n s
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Merge the first n runs hanging off the sort control block, pushing
|
|
|
|
* the resulting run back onto the sort control block.
|
|
|
|
*
|
|
|
|
**************************************/
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
// the only place we call merge_runs with n != RUN_GROUP is SORT_sort
|
|
|
|
// and there n < RUN_GROUP * MAX_MERGE_LEVEL
|
|
|
|
merge_control blks[RUN_GROUP * MAX_MERGE_LEVEL];
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-11-04 00:59:24 +01:00
|
|
|
fb_assert((n - 1) <= FB_NELEM(blks)); // stack var big enough?
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
scb->scb_longs -= SIZEOF_SR_BCKPTR_IN_LONGS;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Make a pass thru the runs allocating buffer space, computing work file
|
|
|
|
// space requirements, and filling in a vector of streams with run pointers
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
const USHORT rec_size = scb->scb_longs << SHIFTLONG;
|
2007-04-03 17:01:37 +02:00
|
|
|
//const USHORT buffers = scb->scb_size_memory / rec_size;
|
|
|
|
//ULONG size = rec_size * (buffers / (USHORT) (2 * n));
|
2004-03-28 11:10:30 +02:00
|
|
|
BLOB_PTR* buffer = (BLOB_PTR *) scb->scb_first_pointer;
|
2004-03-19 07:14:53 +01:00
|
|
|
run_control temp_run;
|
2007-04-03 17:01:37 +02:00
|
|
|
memset(&temp_run, 0, sizeof(run_control));
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
temp_run.run_end_buffer =
|
|
|
|
(SORTP *) (buffer + (scb->scb_size_memory / rec_size) * rec_size);
|
|
|
|
temp_run.run_size = 0;
|
2006-05-31 10:53:00 +02:00
|
|
|
temp_run.run_buff_alloc = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
run_merge_hdr* streams[RUN_GROUP * MAX_MERGE_LEVEL];
|
2004-03-28 11:10:30 +02:00
|
|
|
run_merge_hdr** m1 = streams;
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
sort_runs_by_seek(scb, n);
|
|
|
|
|
|
|
|
// get memory for run's
|
|
|
|
run_control* run = scb->scb_runs;
|
|
|
|
|
|
|
|
CHECK_FILE(scb);
|
|
|
|
USHORT allocated =
|
|
|
|
allocate_memory(scb, n, MAX_SORT_BUFFER_SIZE, (run->run_depth > 0));
|
|
|
|
CHECK_FILE(scb);
|
2004-03-19 07:14:53 +01:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
const USHORT buffers = scb->scb_size_memory / rec_size;
|
|
|
|
USHORT count;
|
|
|
|
ULONG size = 0;
|
|
|
|
if (n > allocated) {
|
|
|
|
size = rec_size * (buffers / (USHORT) (2 * (n - allocated)));
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
for (run = scb->scb_runs, count = 0; count < n;
|
2004-03-19 07:14:53 +01:00
|
|
|
run = run->run_next, count++)
|
|
|
|
{
|
2004-03-28 11:10:30 +02:00
|
|
|
*m1++ = (run_merge_hdr*) run;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// size = 0 indicates the record is too big to divvy up the
|
|
|
|
// big sort buffer, so separate buffers must be allocated
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
if (!run->run_buffer)
|
|
|
|
{
|
|
|
|
if (!size) {
|
|
|
|
if (!run->run_buff_alloc) {
|
|
|
|
run->run_buffer =
|
|
|
|
(ULONG*) scb->scb_pool->allocate(rec_size * 2);
|
|
|
|
run->run_buff_alloc = true;
|
|
|
|
}
|
|
|
|
run->run_end_buffer =
|
|
|
|
reinterpret_cast<ULONG*>((BLOB_PTR *) run->run_buffer + (rec_size * 2));
|
|
|
|
run->run_record =
|
|
|
|
reinterpret_cast<sort_record*>(run->run_end_buffer);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
run->run_buffer = (ULONG *) buffer;
|
|
|
|
buffer += size;
|
|
|
|
run->run_record =
|
|
|
|
reinterpret_cast<sort_record*>(run->run_end_buffer =
|
|
|
|
(ULONG *) buffer);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
temp_run.run_size += run->run_size;
|
|
|
|
}
|
2003-09-29 13:00:38 +02:00
|
|
|
temp_run.run_record = reinterpret_cast<sort_record*>(buffer);
|
|
|
|
temp_run.run_buffer = reinterpret_cast<ULONG*>(temp_run.run_record);
|
2007-04-03 17:01:37 +02:00
|
|
|
temp_run.run_buff_cache = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Build merge tree bottom up.
|
|
|
|
//
|
|
|
|
// See also kissing cousin of this loop in SORT_sort()
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
merge_control* merge;
|
2001-05-23 15:26:42 +02:00
|
|
|
for (count = n, merge = blks; count > 1;) {
|
2004-03-28 11:10:30 +02:00
|
|
|
run_merge_hdr** m2 = m1 = streams;
|
2001-05-23 15:26:42 +02:00
|
|
|
while (count >= 2) {
|
2006-02-23 06:08:26 +01:00
|
|
|
merge->mrg_header.rmh_type = RMH_TYPE_MRG;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-02-23 06:08:26 +01:00
|
|
|
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || // garbage watch
|
|
|
|
((*m1)->rmh_type == RMH_TYPE_RUN));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
(*m1)->rmh_parent = merge;
|
|
|
|
merge->mrg_stream_a = *m1++;
|
|
|
|
|
2006-02-23 06:08:26 +01:00
|
|
|
fb_assert(((*m1)->rmh_type == RMH_TYPE_MRG) || // garbage watch
|
|
|
|
((*m1)->rmh_type == RMH_TYPE_RUN));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
(*m1)->rmh_parent = merge;
|
|
|
|
merge->mrg_stream_b = *m1++;
|
|
|
|
|
2003-08-28 15:16:03 +02:00
|
|
|
merge->mrg_record_a = NULL;
|
|
|
|
merge->mrg_record_b = NULL;
|
2004-03-28 11:10:30 +02:00
|
|
|
*m2++ = (run_merge_hdr*) merge;
|
2001-05-23 15:26:42 +02:00
|
|
|
merge++;
|
|
|
|
count -= 2;
|
|
|
|
}
|
|
|
|
if (count)
|
|
|
|
*m2++ = *m1++;
|
|
|
|
count = m2 - streams;
|
|
|
|
}
|
|
|
|
|
|
|
|
--merge;
|
|
|
|
merge->mrg_header.rmh_parent = NULL;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Merge records into run
|
2007-04-03 17:01:37 +02:00
|
|
|
CHECK_FILE(scb);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
sort_record* q = reinterpret_cast<sort_record*>(temp_run.run_buffer);
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 seek = temp_run.run_seek = find_file_space(scb, temp_run.run_size);
|
2001-05-23 15:26:42 +02:00
|
|
|
temp_run.run_records = 0;
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
CHECK_FILE2(scb, &temp_run);
|
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
const sort_record* p;
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
while (p = get_merge(merge, scb, RSE_get_forward))
|
|
|
|
#else
|
2001-12-24 03:51:06 +01:00
|
|
|
while ( (p = get_merge(merge, scb)) )
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
{
|
2004-03-19 07:14:53 +01:00
|
|
|
if (q >= (sort_record*) temp_run.run_end_buffer) {
|
2001-05-23 15:26:42 +02:00
|
|
|
size = (BLOB_PTR *) q - (BLOB_PTR *) temp_run.run_buffer;
|
2007-04-03 17:01:37 +02:00
|
|
|
seek = SORT_write_block(scb->scb_status_vector, scb->scb_space,
|
2006-05-31 10:53:00 +02:00
|
|
|
seek, (UCHAR*) temp_run.run_buffer, size);
|
2003-09-29 13:00:38 +02:00
|
|
|
q = reinterpret_cast<sort_record*>(temp_run.run_buffer);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
count = scb->scb_longs;
|
2004-03-28 11:10:30 +02:00
|
|
|
do {
|
2001-05-23 15:26:42 +02:00
|
|
|
*q++ = *p++;
|
2004-03-28 11:10:30 +02:00
|
|
|
} while (--count);
|
2001-05-23 15:26:42 +02:00
|
|
|
++temp_run.run_records;
|
|
|
|
}
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
temp_run.run_max_records = temp_run.run_records;
|
|
|
|
#endif
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Write the tail of the new run and return any unused space
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2001-12-24 03:51:06 +01:00
|
|
|
if ( (size = (BLOB_PTR *) q - (BLOB_PTR *) temp_run.run_buffer) )
|
2007-04-03 17:01:37 +02:00
|
|
|
seek = SORT_write_block(scb->scb_status_vector, scb->scb_space,
|
2006-05-31 10:53:00 +02:00
|
|
|
seek, (UCHAR*) temp_run.run_buffer, size);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2005-05-28 00:45:31 +02:00
|
|
|
// If the records did not fill the allocated run (such as when duplicates are
|
2003-09-29 13:00:38 +02:00
|
|
|
// rejected), then free the remainder and diminish the size of the run accordingly
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (seek - temp_run.run_seek < temp_run.run_size) {
|
2007-04-03 17:01:37 +02:00
|
|
|
free_file_space(scb, seek, temp_run.run_seek + temp_run.run_size - seek);
|
2001-05-23 15:26:42 +02:00
|
|
|
temp_run.run_size = seek - temp_run.run_seek;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Make a final pass thru the runs releasing space, blocks, etc.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
for (count = 0; count < n; count++) {
|
2003-09-29 13:00:38 +02:00
|
|
|
// Remove run from list of in-use run blocks
|
2001-05-23 15:26:42 +02:00
|
|
|
run = scb->scb_runs;
|
|
|
|
scb->scb_runs = run->run_next;
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
seek = run->run_seek + run->run_cached - run->run_size;
|
|
|
|
#else
|
|
|
|
seek = run->run_seek - run->run_size;
|
|
|
|
#endif
|
2003-09-29 13:00:38 +02:00
|
|
|
// Free the sort file space associated with the run
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
free_file_space(scb, seek, run->run_size);
|
|
|
|
|
|
|
|
if (run->run_mem_size)
|
|
|
|
{
|
|
|
|
free_file_space(scb, run->run_mem_seek, run->run_mem_size);
|
|
|
|
run->run_mem_seek = run->run_mem_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
run->run_buff_cache = false;
|
|
|
|
run->run_buffer = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Add run descriptor to list of unused run descriptor blocks
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
run->run_next = scb->scb_free_runs;
|
|
|
|
scb->scb_free_runs = run;
|
|
|
|
}
|
|
|
|
|
|
|
|
scb->scb_free_runs = run->run_next;
|
|
|
|
if (run->run_buff_alloc) {
|
2006-05-31 10:53:00 +02:00
|
|
|
delete run->run_buffer;
|
|
|
|
run->run_buff_alloc = false;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
2006-02-23 06:08:26 +01:00
|
|
|
temp_run.run_header.rmh_type = RMH_TYPE_RUN;
|
2001-05-23 15:26:42 +02:00
|
|
|
temp_run.run_depth = run->run_depth;
|
2007-04-03 17:01:37 +02:00
|
|
|
temp_run.run_buff_cache = false;
|
|
|
|
temp_run.run_buffer = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
*run = temp_run;
|
|
|
|
++run->run_depth;
|
2007-04-03 17:01:37 +02:00
|
|
|
run->run_next = scb->scb_runs;
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_runs = run;
|
|
|
|
scb->scb_longs += SIZEOF_SR_BCKPTR_IN_LONGS;
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
CHECK_FILE(scb);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
inline void swap(SORTP** a, SORTP** b)
|
2007-03-25 18:09:00 +02:00
|
|
|
{
|
|
|
|
((SORTP ***) (*a))[BACK_OFFSET] = b;
|
|
|
|
((SORTP ***) (*b))[BACK_OFFSET] = a;
|
2007-11-17 11:18:10 +01:00
|
|
|
SORTP* temp = *a;
|
2007-03-25 18:09:00 +02:00
|
|
|
*a = *b;
|
|
|
|
*b = temp;
|
|
|
|
}
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
static void quick(SLONG size, SORTP** pointers, ULONG length)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* q u i c k
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
2005-05-28 00:45:31 +02:00
|
|
|
* Sort an array of record pointers. The routine assumes the
|
2001-05-23 15:26:42 +02:00
|
|
|
* following:
|
|
|
|
*
|
|
|
|
* a. Each element in the array points to the key of a record.
|
|
|
|
*
|
|
|
|
* b. Keys can be compared by auto-incrementing unsigned longword
|
|
|
|
* compares.
|
|
|
|
*
|
|
|
|
* c. Relative array positions "-1" and "size" point to guard records
|
|
|
|
* containing the least and the greatest possible sort keys.
|
|
|
|
*
|
|
|
|
* ***************************************************************
|
|
|
|
* * Boy, did the assumption below turn out to be pretty stupid! *
|
|
|
|
* ***************************************************************
|
|
|
|
*
|
|
|
|
* Note: For the time being, the key length field is ignored on the
|
|
|
|
* assumption that something will eventually stop the comparison.
|
|
|
|
*
|
|
|
|
* WARNING: THIS ROUTINE DOES NOT MAKE A FINAL PASS TO UNSCRAMBLE
|
|
|
|
* PARITIONS OF SIZE TWO. THE POINTER ARRAY REQUIRES ADDITIONAL
|
|
|
|
* PROCESSING BEFORE IT MAY BE USED!
|
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-28 11:10:30 +02:00
|
|
|
SORTP** stack_lower[50];
|
|
|
|
SORTP*** sl = stack_lower;
|
|
|
|
|
|
|
|
SORTP** stack_upper[50];
|
|
|
|
SORTP*** su = stack_upper;
|
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
*sl++ = pointers;
|
|
|
|
*su++ = pointers + size - 1;
|
|
|
|
|
|
|
|
while (sl > stack_lower) {
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Pick up the next interval off the respective stacks
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
SORTP** r = *--sl;
|
|
|
|
SORTP** j = *--su;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Compute the interval. If two or less, defer the sort to a final pass.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
const SLONG interval = j - r;
|
2001-05-23 15:26:42 +02:00
|
|
|
if (interval < 2)
|
|
|
|
continue;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Go guard against pre-ordered data, swap the first record with the
|
|
|
|
// middle record. This isn't perfect, but it is cheap.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
SORTP** i = r + interval / 2;
|
2007-03-25 18:09:00 +02:00
|
|
|
swap(i, r);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Prepare to do the partition. Pick up the first longword of the
|
|
|
|
// key to speed up comparisons.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
i = r + 1;
|
2004-03-28 11:10:30 +02:00
|
|
|
const ULONG key = **r;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// From each end of the interval converge to the middle swapping out of
|
|
|
|
// parition records as we go. Stop when we converge.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-12-11 11:33:30 +01:00
|
|
|
while (true) {
|
2001-05-23 15:26:42 +02:00
|
|
|
while (**i < key)
|
|
|
|
i++;
|
|
|
|
if (**i == key)
|
2006-03-13 15:32:15 +01:00
|
|
|
while (i <= *su) {
|
2004-03-28 11:10:30 +02:00
|
|
|
const SORTP* p = *i;
|
|
|
|
const SORTP* q = *r;
|
|
|
|
ULONG tl = length - 1;
|
2001-05-23 15:26:42 +02:00
|
|
|
while (tl && *p == *q) {
|
|
|
|
p++;
|
|
|
|
q++;
|
|
|
|
tl--;
|
|
|
|
}
|
|
|
|
if (tl && *p > *q)
|
|
|
|
break;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (**j > key)
|
|
|
|
j--;
|
|
|
|
if (**j == key)
|
|
|
|
while (j != r) {
|
2004-03-28 11:10:30 +02:00
|
|
|
const SORTP* p = *j;
|
|
|
|
const SORTP* q = *r;
|
|
|
|
ULONG tl = length - 1;
|
2001-05-23 15:26:42 +02:00
|
|
|
while (tl && *p == *q) {
|
|
|
|
p++;
|
|
|
|
q++;
|
|
|
|
tl--;
|
|
|
|
}
|
|
|
|
if (tl && *p < *q)
|
|
|
|
break;
|
|
|
|
j--;
|
|
|
|
}
|
|
|
|
if (i >= j)
|
|
|
|
break;
|
2007-03-25 18:09:00 +02:00
|
|
|
swap(i, j);
|
2001-05-23 15:26:42 +02:00
|
|
|
i++;
|
|
|
|
j--;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// We have formed two partitions, separated by a slot for the
|
|
|
|
// initial record "r". Exchange the record currently in the
|
|
|
|
// slot with "r".
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-03-25 18:09:00 +02:00
|
|
|
swap(r, j);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Finally, stack the two intervals, longest first
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
i = *su;
|
|
|
|
if ((j - r) > (i - j + 1)) {
|
|
|
|
*sl++ = r;
|
|
|
|
*su++ = j - 1;
|
|
|
|
*sl++ = j + 1;
|
|
|
|
*su++ = i;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
*sl++ = j + 1;
|
|
|
|
*su++ = i;
|
|
|
|
*sl++ = r;
|
|
|
|
*su++ = j - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
static ULONG order(sort_context* scb)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* o r d e r
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* The memoryfull of record pointers have been sorted, but more
|
|
|
|
* records remain, so the run will have to be written to disk. To
|
|
|
|
* speed this up, re-arrange the records in physical order so they
|
|
|
|
* can be written with a single disk write.
|
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-19 07:14:53 +01:00
|
|
|
sort_record** ptr = scb->scb_first_pointer + 1; // 1st ptr is low key
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Last inserted record, also the top of the memory where SORT_RECORDS can
|
|
|
|
// be written
|
2004-03-28 11:10:30 +02:00
|
|
|
sort_record* output = reinterpret_cast<sort_record*>(scb->scb_last_record);
|
|
|
|
sort_ptr_t* lower_limit = reinterpret_cast<sort_ptr_t*>(output);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
Firebird::HalfStaticArray<ULONG, 1024> record_buffer(*scb->scb_pool);
|
|
|
|
SORTP* buffer = record_buffer.getBuffer(scb->scb_longs);
|
|
|
|
//(SORTP*) scb->scb_pool->allocate(scb->scb_longs * sizeof(ULONG));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Length of the key part of the record
|
2004-03-19 07:14:53 +01:00
|
|
|
const SSHORT length = scb->scb_longs - SIZEOF_SR_BCKPTR_IN_LONGS;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// scb_next_pointer points to the end of pointer memory or the beginning of
|
|
|
|
// records
|
2008-01-16 10:48:41 +01:00
|
|
|
while (ptr < scb->scb_next_pointer)
|
|
|
|
{
|
2003-09-29 13:00:38 +02:00
|
|
|
// If the next pointer is null, it's record has been eliminated as a
|
|
|
|
// duplicate. This is the only easy case.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
SR* record = reinterpret_cast<SR*>(*ptr++);
|
|
|
|
if (!record)
|
2001-05-23 15:26:42 +02:00
|
|
|
continue;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Make record point back to the starting of SR struct,
|
2004-03-19 07:14:53 +01:00
|
|
|
// as all scb* pointer point to the key_id locations!
|
2001-05-23 15:26:42 +02:00
|
|
|
record =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<SR*>(((SORTP *) record) - SIZEOF_SR_BCKPTR_IN_LONGS);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If the lower limit of live records points to a deleted or used record,
|
|
|
|
// advance the lower limit
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
while (!*(lower_limit)
|
2004-03-28 11:10:30 +02:00
|
|
|
&& (lower_limit < (sort_ptr_t*) scb->scb_end_memory))
|
2004-03-19 07:14:53 +01:00
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
lower_limit =
|
2004-03-28 11:10:30 +02:00
|
|
|
reinterpret_cast<sort_ptr_t*>(((SORTP *) lower_limit) + scb->scb_longs);
|
2004-03-19 07:14:53 +01:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If the record we want to move won't interfere with lower active
|
|
|
|
// record, just move the record into position
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
if (record->sr_sort_record.sort_record_key == (ULONG *) lower_limit) {
|
|
|
|
MOVE_32(length, record->sr_sort_record.sort_record_key, output);
|
|
|
|
output =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<sort_record*>((SORTP *) output + length);
|
2001-05-23 15:26:42 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (((SORTP *) output) + scb->scb_longs - 1 <= (SORTP *) lower_limit) {
|
2003-09-29 13:00:38 +02:00
|
|
|
// null the bckptr for this record
|
2003-08-28 15:16:03 +02:00
|
|
|
record->sr_bckptr = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
MOVE_32(length, record->sr_sort_record.sort_record_key, output);
|
|
|
|
output =
|
2003-09-29 13:00:38 +02:00
|
|
|
reinterpret_cast<sort_record*>((SORTP *) output + length);
|
2001-05-23 15:26:42 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// There's another record sitting where we want to put our record. Move
|
|
|
|
// the next logical record to a temp, move the lower limit record to the
|
|
|
|
// next record's old position (adjusting pointers as we go), then move
|
|
|
|
// the current record to output.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
MOVE_32(length, (SORTP *) record->sr_sort_record.sort_record_key,
|
|
|
|
buffer);
|
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
**((sort_ptr_t***) lower_limit) =
|
|
|
|
reinterpret_cast<sort_ptr_t*>(record->sr_sort_record.sort_record_key);
|
2001-05-23 15:26:42 +02:00
|
|
|
MOVE_32(scb->scb_longs, lower_limit, record);
|
2004-03-28 11:10:30 +02:00
|
|
|
lower_limit = (sort_ptr_t*) ((SORTP *) lower_limit + scb->scb_longs);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
MOVE_32(length, buffer, output);
|
|
|
|
output =
|
2004-03-28 11:10:30 +02:00
|
|
|
reinterpret_cast<sort_record*>((sort_ptr_t*) ((SORTP *) output + length));
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
//delete buffer;
|
2006-05-31 10:53:00 +02:00
|
|
|
|
2001-05-23 15:26:42 +02:00
|
|
|
return (((SORTP *) output) -
|
|
|
|
((SORTP *) scb->scb_last_record)) / (scb->scb_longs -
|
|
|
|
SIZEOF_SR_BCKPTR_IN_LONGS);
|
|
|
|
}
|
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
static void order_and_save(sort_context* scb)
|
|
|
|
{
|
2007-04-05 12:28:11 +02:00
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* o r d e r _ a n d _ s a v e
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
2007-04-06 10:48:18 +02:00
|
|
|
* The memory full of record pointers has been sorted, but more
|
2007-04-05 12:28:11 +02:00
|
|
|
* records remain, so the run will have to be written to scratch file.
|
|
|
|
* If target run can be allocated in contiguous chunk of memory then
|
|
|
|
* just memcpy records into it. Else call more expensive order() to
|
2007-04-06 10:48:18 +02:00
|
|
|
* physically rearrange records in sort space and write its run into
|
2007-04-05 12:28:11 +02:00
|
|
|
* scratch file as one big chunk
|
|
|
|
*
|
|
|
|
**************************************/
|
2008-01-26 14:17:19 +01:00
|
|
|
Database::Checkout dcoHolder(scb->scb_attachment->att_database);
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
run_control* run = scb->scb_runs;
|
|
|
|
run->run_records = 0;
|
|
|
|
|
|
|
|
sort_record** ptr = scb->scb_first_pointer + 1; // 1st ptr is low key
|
|
|
|
// scb_next_pointer points to the end of pointer memory or the beginning of records
|
|
|
|
while (ptr < scb->scb_next_pointer)
|
|
|
|
{
|
|
|
|
// If the next pointer is null, it's record has been eliminated as a
|
|
|
|
// duplicate. This is the only easy case.
|
|
|
|
if (!(*ptr++))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
run->run_records++;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ULONG key_length =
|
|
|
|
(scb->scb_longs - SIZEOF_SR_BCKPTR_IN_LONGS) * sizeof(ULONG);
|
|
|
|
run->run_size = run->run_records * key_length;
|
2007-11-12 15:26:44 +01:00
|
|
|
FB_UINT64 seek = run->run_seek = find_file_space(scb, run->run_size);
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
TempSpace* tmpSpace = scb->scb_space;
|
|
|
|
char* mem = tmpSpace->inMemory(run->run_seek, run->run_size);
|
|
|
|
|
|
|
|
if (mem)
|
|
|
|
{
|
|
|
|
ptr = scb->scb_first_pointer + 1;
|
|
|
|
while (ptr < scb->scb_next_pointer)
|
|
|
|
{
|
|
|
|
SR* record = (SR*) (*ptr++);
|
|
|
|
|
|
|
|
if (!record)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// make record point back to the starting of SR struct.
|
|
|
|
// as all scb_*_pointer point to the key_id locations!
|
|
|
|
record = (SR*) (((SORTP*)record) - SIZEOF_SR_BCKPTR_IN_LONGS);
|
|
|
|
|
|
|
|
memcpy(mem, record->sr_sort_record.sort_record_key, key_length);
|
|
|
|
mem += key_length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
order(scb);
|
|
|
|
|
|
|
|
SORT_write_block(scb->scb_status_vector, scb->scb_space,
|
|
|
|
run->run_seek, (UCHAR*) scb->scb_last_record,
|
|
|
|
run->run_size);
|
|
|
|
}
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
static void put_run(sort_context* scb)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* p u t _ r u n
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Memory has been exhausted. Do a sort on what we have and write
|
|
|
|
* it to the scratch file. Keep in mind that since duplicate records
|
|
|
|
* may disappear, the number of records in the run may be less than
|
|
|
|
* were sorted.
|
|
|
|
*
|
|
|
|
**************************************/
|
2006-05-31 10:53:00 +02:00
|
|
|
try {
|
|
|
|
|
2004-03-28 11:10:30 +02:00
|
|
|
run_control* run = scb->scb_free_runs;
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-05-31 10:53:00 +02:00
|
|
|
if (run) {
|
2001-05-23 15:26:42 +02:00
|
|
|
scb->scb_free_runs = run->run_next;
|
2006-05-31 10:53:00 +02:00
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
else {
|
2006-05-31 10:53:00 +02:00
|
|
|
run = (run_control*) FB_NEW(*scb->scb_pool) run_control;
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
2007-04-03 17:01:37 +02:00
|
|
|
memset(run, 0, sizeof(run_control));
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
run->run_next = scb->scb_runs;
|
|
|
|
scb->scb_runs = run;
|
2006-02-23 06:08:26 +01:00
|
|
|
run->run_header.rmh_type = RMH_TYPE_RUN;
|
2001-05-23 15:26:42 +02:00
|
|
|
run->run_depth = 0;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Do the in-core sort. The first phase a duplicate handling we be performed
|
|
|
|
// in "sort".
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
sort(scb);
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Re-arrange records in physical order so they can be dumped in a single write
|
|
|
|
// operation
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
|
|
|
run->run_records = run->run_max_records = order(scb);
|
|
|
|
run->run_cached = 0;
|
2007-04-03 17:01:37 +02:00
|
|
|
|
|
|
|
// Write records to scratch file. Keep track of the number of bytes
|
|
|
|
// written, etc.
|
|
|
|
|
|
|
|
run->run_size =
|
|
|
|
run->run_records * (scb->scb_longs -
|
|
|
|
SIZEOF_SR_BCKPTR_IN_LONGS) * sizeof(ULONG);
|
|
|
|
run->run_seek = find_file_space(scb, run->run_size);
|
|
|
|
SORT_write_block(scb->scb_status_vector, scb->scb_space,
|
|
|
|
run->run_seek, (UCHAR*) scb->scb_last_record,
|
|
|
|
run->run_size);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2007-04-03 17:01:37 +02:00
|
|
|
order_and_save(scb);
|
|
|
|
/*
|
2001-05-23 15:26:42 +02:00
|
|
|
run->run_records = order(scb);
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Write records to scratch file. Keep track of the number of bytes
|
2005-05-28 00:45:31 +02:00
|
|
|
// written, etc.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
run->run_size =
|
|
|
|
run->run_records * (scb->scb_longs -
|
|
|
|
SIZEOF_SR_BCKPTR_IN_LONGS) * sizeof(ULONG);
|
2006-05-31 10:53:00 +02:00
|
|
|
run->run_seek = find_file_space(scb, run->run_size);
|
2007-04-03 17:01:37 +02:00
|
|
|
SORT_write_block(scb->scb_status_vector, scb->scb_space,
|
2006-05-31 10:53:00 +02:00
|
|
|
run->run_seek, (UCHAR*) scb->scb_last_record,
|
|
|
|
run->run_size);
|
2007-04-03 17:01:37 +02:00
|
|
|
*/
|
|
|
|
#endif
|
2006-05-31 10:53:00 +02:00
|
|
|
}
|
|
|
|
catch (const Firebird::BadAlloc&) {
|
|
|
|
error_memory(scb);
|
|
|
|
}
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
static void sort(sort_context* scb)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* s o r t
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Set up for and call quick sort. Quicksort, by design, doesn't
|
|
|
|
* order partitions of length 2, so make a pass thru the data to
|
|
|
|
* straighten out pairs. While we at it, if duplicate handling has
|
|
|
|
* been requested, detect and handle them.
|
|
|
|
*
|
|
|
|
**************************************/
|
2007-04-03 17:01:37 +02:00
|
|
|
|
2008-01-26 14:17:19 +01:00
|
|
|
Database::Checkout dcoHolder(scb->scb_attachment->att_database);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// First, insert a pointer to the high key
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
*scb->scb_next_pointer = reinterpret_cast<sort_record*>(high_key);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Next, call QuickSort. Keep in mind that the first pointer is the
|
|
|
|
// low key and not a record.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2004-03-19 07:14:53 +01:00
|
|
|
SORTP** j = (SORTP **) (scb->scb_first_pointer) + 1;
|
|
|
|
const ULONG n = (SORTP **) (scb->scb_next_pointer) - j; // calculate # of records
|
2001-05-23 15:26:42 +02:00
|
|
|
|
|
|
|
quick(n, j, scb->scb_longs);
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Scream through and correct any out of order pairs
|
2006-03-13 15:32:15 +01:00
|
|
|
// hvlad: don't compare user keys against high_key
|
2008-01-16 10:48:41 +01:00
|
|
|
while (j < (SORTP **) scb->scb_next_pointer - 1)
|
|
|
|
{
|
2004-03-19 07:14:53 +01:00
|
|
|
SORTP** i = j;
|
2001-05-23 15:26:42 +02:00
|
|
|
j++;
|
|
|
|
if (**i >= **j) {
|
2004-03-28 11:10:30 +02:00
|
|
|
const SORTP* p = *i;
|
|
|
|
const SORTP* q = *j;
|
|
|
|
ULONG tl = scb->scb_longs - 1;
|
2001-05-23 15:26:42 +02:00
|
|
|
while (tl && *p == *q) {
|
|
|
|
p++;
|
|
|
|
q++;
|
|
|
|
tl--;
|
|
|
|
}
|
|
|
|
if (tl && *p > *q) {
|
2007-03-27 13:46:47 +02:00
|
|
|
swap(i, j);
|
2001-05-23 15:26:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// If duplicate handling hasn't been requested, we're done
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2008-01-26 14:17:19 +01:00
|
|
|
if (!scb->scb_dup_callback)
|
2001-05-23 15:26:42 +02:00
|
|
|
return;
|
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
// Make another pass and eliminate duplicates. It's possible to do this
|
|
|
|
// is the same pass the final ordering, but the logic is complicated enough
|
|
|
|
// to screw up register optimizations. Better two fast passes than one
|
|
|
|
// slow pass, I suppose. Prove me wrong and win a trip for two to
|
|
|
|
// Cleveland, Ohio.
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2003-09-29 13:00:38 +02:00
|
|
|
j = reinterpret_cast<SORTP**>(scb->scb_first_pointer + 1);
|
2001-05-23 15:26:42 +02:00
|
|
|
|
2006-03-13 15:32:15 +01:00
|
|
|
// hvlad: don't compare user keys against high_key
|
2008-01-16 10:48:41 +01:00
|
|
|
while (j < ((SORTP **) scb->scb_next_pointer) - 1)
|
|
|
|
{
|
2004-03-19 07:14:53 +01:00
|
|
|
SORTP** i = j;
|
2001-05-23 15:26:42 +02:00
|
|
|
j++;
|
|
|
|
if (**i != **j)
|
|
|
|
continue;
|
2004-03-28 11:10:30 +02:00
|
|
|
const SORTP* p = *i;
|
|
|
|
const SORTP* q = *j;
|
2004-11-01 08:51:55 +01:00
|
|
|
|
|
|
|
ULONG l = scb->scb_unique_length;
|
|
|
|
DO_32_COMPARE(p, q, l);
|
2008-01-16 10:48:41 +01:00
|
|
|
if (l == 0)
|
|
|
|
{
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) *i, scb, false);
|
|
|
|
SORT_diddle_key((UCHAR*) *j, scb, false);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) *i, scb, false);
|
|
|
|
diddle_key((UCHAR*) *j, scb, false);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
2003-12-11 11:33:30 +01:00
|
|
|
if ((*scb->scb_dup_callback) ((const UCHAR*) *i, (const UCHAR*) *j, scb->scb_dup_callback_arg))
|
|
|
|
{
|
2007-03-25 18:09:00 +02:00
|
|
|
((SORTP ***) (*i))[BACK_OFFSET] = NULL;
|
2001-05-23 15:26:42 +02:00
|
|
|
*i = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#ifdef SCROLLABLE_CURSORS
|
2008-01-16 10:48:41 +01:00
|
|
|
SORT_diddle_key((UCHAR*) *i, scb, true);
|
|
|
|
SORT_diddle_key((UCHAR*) *j, scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#else
|
2008-01-16 10:48:41 +01:00
|
|
|
diddle_key((UCHAR*) *i, scb, true);
|
|
|
|
diddle_key((UCHAR*) *j, scb, true);
|
2001-05-23 15:26:42 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
|
2007-04-06 04:58:43 +02:00
|
|
|
namespace
|
2007-04-03 17:01:37 +02:00
|
|
|
{
|
2007-04-06 04:58:43 +02:00
|
|
|
class RunSort
|
|
|
|
{
|
|
|
|
public:
|
2008-05-10 05:44:57 +02:00
|
|
|
explicit RunSort(run_control* irun) : run(irun) {}
|
2007-04-06 04:58:43 +02:00
|
|
|
RunSort() : run(NULL) {}
|
2007-04-03 17:01:37 +02:00
|
|
|
|
2008-05-10 05:44:57 +02:00
|
|
|
static const FB_UINT64 generate(const void*, const RunSort& item)
|
|
|
|
{
|
|
|
|
return item.run->run_seek;
|
|
|
|
}
|
2007-04-03 17:01:37 +02:00
|
|
|
|
2007-04-06 04:58:43 +02:00
|
|
|
run_control* run;
|
|
|
|
};
|
|
|
|
} // namespace
|
2007-04-03 17:01:37 +02:00
|
|
|
|
2007-04-05 11:13:10 +02:00
|
|
|
|
2007-04-03 17:01:37 +02:00
|
|
|
static void sort_runs_by_seek(sort_context* scb, int n)
|
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* s o r t _ r u n s _ b y _ s e e k
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Sort first n runs by its seek position in scratch file
|
|
|
|
* This allows to order file reads and make merge faster
|
|
|
|
*
|
|
|
|
**************************************/
|
|
|
|
|
|
|
|
Firebird::SortedArray<
|
2007-11-12 15:26:44 +01:00
|
|
|
RunSort, Firebird::InlineStorage<RunSort, RUN_GROUP>, FB_UINT64, RunSort
|
2007-04-03 17:01:37 +02:00
|
|
|
>
|
|
|
|
runs(*scb->scb_pool, n);
|
|
|
|
|
|
|
|
run_control* run;
|
|
|
|
for (run = scb->scb_runs; run && n; run = run->run_next, n--) {
|
|
|
|
runs.add(RunSort(run));
|
|
|
|
}
|
|
|
|
run_control* tail = run;
|
|
|
|
|
|
|
|
RunSort* rs = runs.begin();
|
2007-04-05 11:13:10 +02:00
|
|
|
run = scb->scb_runs = rs->run;
|
2007-04-03 17:01:37 +02:00
|
|
|
for (rs++; rs < runs.end(); rs++)
|
|
|
|
{
|
2007-04-05 11:13:10 +02:00
|
|
|
run->run_next = rs->run;
|
|
|
|
run = rs->run;
|
2007-04-03 17:01:37 +02:00
|
|
|
}
|
|
|
|
run->run_next = tail;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-03-03 10:22:32 +01:00
|
|
|
#ifdef NOT_USED_OR_REPLACED
|
2001-05-23 15:26:42 +02:00
|
|
|
#ifdef DEBUG
|
2004-03-19 07:14:53 +01:00
|
|
|
static void validate(sort_context* scb)
|
2001-05-23 15:26:42 +02:00
|
|
|
{
|
|
|
|
/**************************************
|
|
|
|
*
|
|
|
|
* v a l i d a t e
|
|
|
|
*
|
|
|
|
**************************************
|
|
|
|
*
|
|
|
|
* Functional description
|
|
|
|
* Validate data structures.
|
|
|
|
*
|
|
|
|
**************************************/
|
2004-03-28 11:10:30 +02:00
|
|
|
for (SORTP** ptr = (SORTP **) (scb->scb_first_pointer + 1);
|
2005-05-28 00:45:31 +02:00
|
|
|
ptr < (SORTP **) scb->scb_next_pointer; ptr++)
|
2004-03-28 11:10:30 +02:00
|
|
|
{
|
|
|
|
SORTP* record = *ptr;
|
2007-03-25 18:09:00 +02:00
|
|
|
if (record[-SIZEOF_SR_BCKPTR_IN_LONGS] != (SORTP) ptr) {
|
2004-03-28 11:10:30 +02:00
|
|
|
ISC_STATUS* status_vector = scb->scb_status_vector;
|
2003-11-11 13:19:20 +01:00
|
|
|
*status_vector++ = isc_arg_gds;
|
|
|
|
*status_vector++ = isc_crrp_data_err; // Msg360: corruption in data structure
|
|
|
|
*status_vector = isc_arg_end;
|
2001-05-23 15:26:42 +02:00
|
|
|
ERR_punt();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2003-03-03 10:22:32 +01:00
|
|
|
#endif
|