8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-23 04:43:03 +01:00

Increased the sort record size limit to 1MB. I've kept this limit due to the possible performance implications, but it can be extended/removed any time.

This commit is contained in:
dimitr 2013-08-21 09:11:57 +00:00
parent 99b1b17f27
commit c1cbd97163
6 changed files with 29 additions and 37 deletions

View File

@ -108,7 +108,7 @@ public:
}
dsc desc;
USHORT length;
ULONG length;
bool intl;
ULONG impure;
Firebird::HalfStaticArray<sort_key_def, 2> keyItems;

View File

@ -1947,10 +1947,9 @@ void OPT_gen_aggregate_distincts(thread_db* tdbb, CompilerScratch* csb, MapNode*
fb_assert(desc->dsc_dtype < FB_NELEM(sort_dtypes));
sort_key->skd_dtype = sort_dtypes[desc->dsc_dtype];
if (!sort_key->skd_dtype)
{
ERR_post(Arg::Gds(isc_invalid_sort_datatype) << Arg::Str(DSC_dtype_tostring(desc->dsc_dtype)));
}
sort_key->skd_length = desc->dsc_length;
@ -2545,7 +2544,7 @@ SortedStream* OPT_gen_sort(thread_db* tdbb, CompilerScratch* csb, const StreamLi
#ifndef WORDS_BIGENDIAN
map_length = ROUNDUP(map_length, sizeof(SLONG));
#endif
const USHORT flag_offset = (USHORT) map_length++;
const ULONG flag_offset = map_length++;
sort_key->skd_offset = flag_offset;
sort_key->skd_dtype = SKD_text;
sort_key->skd_length = 1;
@ -2566,7 +2565,7 @@ SortedStream* OPT_gen_sort(thread_db* tdbb, CompilerScratch* csb, const StreamLi
map_length = FB_ALIGN(map_length, type_alignments[desc->dsc_dtype]);
#endif
sort_key->skd_offset = (USHORT) map_length;
sort_key->skd_offset = map_length;
sort_key->skd_flags = SKD_ascending;
if (*descending)
sort_key->skd_flags |= SKD_descending;
@ -2601,8 +2600,8 @@ SortedStream* OPT_gen_sort(thread_db* tdbb, CompilerScratch* csb, const StreamLi
}
map_length = ROUNDUP(map_length, sizeof(SLONG));
map->keyLength = (USHORT) map_length;
USHORT flag_offset = (USHORT) map_length;
map->keyLength = map_length;
ULONG flag_offset = map_length;
map_length += items - sort->expressions.getCount();
// Now go back and process all to fields involved with the sort. If the
@ -2715,7 +2714,7 @@ SortedStream* OPT_gen_sort(thread_db* tdbb, CompilerScratch* csb, const StreamLi
fb_assert(sort_key->skd_dtype != 0);
if (sort_key->skd_dtype == SKD_varying || sort_key->skd_dtype == SKD_cstring)
{
sort_key->skd_vary_offset = (USHORT) map_length;
sort_key->skd_vary_offset = map_length;
map_length += sizeof(USHORT);
}
}
@ -2726,7 +2725,7 @@ SortedStream* OPT_gen_sort(thread_db* tdbb, CompilerScratch* csb, const StreamLi
// Msg438: sort record size of %ld bytes is too big
}
map->length = (USHORT) map_length;
map->length = map_length;
// That was most unpleasant. Never the less, it's done (except for the debugging).
// All that remains is to build the record source block for the sort.

View File

@ -500,7 +500,7 @@ namespace Jrd
StreamType stream; // stream for field id
dsc desc; // relative descriptor
USHORT flagOffset; // offset of missing flag
ULONG flagOffset; // offset of missing flag
SSHORT fieldId; // id for field (or ID constants)
NestConst<ValueExprNode> node; // expression node
};
@ -515,8 +515,8 @@ namespace Jrd
{
}
USHORT length; // sort record length
USHORT keyLength; // key length
ULONG length; // sort record length
ULONG keyLength; // key length
USHORT flags; // misc sort flags
Firebird::Array<sort_key_def> keyItems; // address of key descriptors
Firebird::Array<Item> items;

View File

@ -232,9 +232,7 @@ Sort* SortedStream::init(thread_db* tdbb) const
}
if (!EVL_field(rpb->rpb_relation, rpb->rpb_record, item->fieldId, from))
{
flag = true;
}
}
*(data + item->flagOffset) = flag ? TRUE : FALSE;
@ -245,7 +243,7 @@ Sort* SortedStream::init(thread_db* tdbb) const
// then want to sort by language dependent order.
if (IS_INTL_DATA(&item->desc) &&
(USHORT)(IPTR) item->desc.dsc_address < m_map->keyLength)
(ULONG)(IPTR) item->desc.dsc_address < m_map->keyLength)
{
INTL_string_to_key(tdbb, INTL_INDEX_TYPE(&item->desc), from, &to,
(m_map->flags & FLAG_UNIQUE ? INTL_KEY_UNIQUE : INTL_KEY_SORT));
@ -325,7 +323,7 @@ void SortedStream::mapData(thread_db* tdbb, jrd_req* request, UCHAR* data) const
// list that contains the data to send back
if (IS_INTL_DATA(&item->desc) &&
(USHORT)(IPTR) item->desc.dsc_address < m_map->keyLength)
(ULONG)(IPTR) item->desc.dsc_address < m_map->keyLength)
{
continue;
}

View File

@ -72,9 +72,7 @@ using namespace Firebird;
void SortOwner::unlinkAll()
{
while (sorts.getCount())
{
delete sorts.pop();
}
}
// The sort buffer size should be just under a multiple of the
@ -161,7 +159,7 @@ namespace
Sort::Sort(Database* dbb,
SortOwner* owner,
USHORT record_length,
ULONG record_length,
size_t keys,
size_t unique_keys,
const sort_key_def* key_description,
@ -585,13 +583,13 @@ void Sort::sort(thread_db* tdbb)
if (allocated < run_count)
{
const USHORT rec_size = m_longs << SHIFTLONG;
const ULONG rec_size = m_longs << SHIFTLONG;
allocSize = MAX_SORT_BUFFER_SIZE * RUN_GROUP;
for (run = m_runs; run; run = run->run_next)
{
if (!run->run_buffer)
{
int mem_size = MIN(allocSize / rec_size, run->run_records) * rec_size;
size_t mem_size = MIN(allocSize / rec_size, run->run_records) * rec_size;
UCHAR* mem = NULL;
try
{
@ -882,16 +880,12 @@ void Sort::diddleKey(UCHAR* record, bool direction)
if (key->skd_dtype == SKD_double)
{
if (*(double*) p == 0)
{
*(double*) p = 0;
}
}
else if (key->skd_dtype == SKD_float)
{
if (*(float*) p == 0)
{
*(float*) p = 0;
}
}
}
@ -1331,7 +1325,7 @@ ULONG Sort::allocate(ULONG n, ULONG chunkSize, bool useFreeSpace)
* Allocate memory for first n runs
*
**************************************/
const USHORT rec_size = m_longs << SHIFTLONG;
const ULONG rec_size = m_longs << SHIFTLONG;
ULONG allocated = 0, count;
run_control* run;
@ -1411,7 +1405,7 @@ void Sort::mergeRuns(USHORT n)
// Make a pass thru the runs allocating buffer space, computing work file
// space requirements, and filling in a vector of streams with run pointers
const USHORT rec_size = m_longs << SHIFTLONG;
const ULONG rec_size = m_longs << SHIFTLONG;
UCHAR* buffer = (UCHAR*) m_first_pointer;
run_control temp_run;
memset(&temp_run, 0, sizeof(run_control));
@ -1435,9 +1429,10 @@ void Sort::mergeRuns(USHORT n)
const USHORT buffers = m_size_memory / rec_size;
USHORT count;
ULONG size = 0;
if (n > allocated) {
if (n > allocated)
size = rec_size * (buffers / (USHORT) (2 * (n - allocated)));
}
for (run = m_runs, count = 0; count < n; run = run->run_next, count++)
{
*m1++ = (run_merge_hdr*) run;
@ -1528,10 +1523,10 @@ void Sort::mergeRuns(USHORT n)
seek = writeBlock(m_space, seek, temp_run.run_buffer, size);
q = reinterpret_cast<sort_record*>(temp_run.run_buffer);
}
count = m_longs;
ULONG longs_count = m_longs;
do {
*q++ = *p++;
} while (--count);
} while (--longs_count);
++temp_run.run_records;
}
@ -1756,7 +1751,7 @@ ULONG Sort::order()
SORTP* buffer = record_buffer.getBuffer(m_longs);
// Length of the key part of the record
const SSHORT length = m_longs - SIZEOF_SR_BCKPTR_IN_LONGS;
const ULONG length = m_longs - SIZEOF_SR_BCKPTR_IN_LONGS;
// m_next_pointer points to the end of pointer memory or the beginning of
// records

View File

@ -93,7 +93,7 @@ struct sort_record
};
const ULONG MAX_SORT_RECORD = 65535; // bytes
const ULONG MAX_SORT_RECORD = 1024 * 1024; // 1MB
// the record struct actually contains the keyids etc, and the back_pointer
// which points to the sort_record structure.
@ -131,8 +131,8 @@ struct sort_key_def
UCHAR skd_dtype; // Data type
UCHAR skd_flags; // Flags
USHORT skd_length; // Length if string
USHORT skd_offset; // Offset from beginning
USHORT skd_vary_offset; // Offset to varying/cstring length
ULONG skd_offset; // Offset from beginning
ULONG skd_vary_offset; // Offset to varying/cstring length
};
@ -212,7 +212,7 @@ class Sort
{
public:
Sort(Database*, SortOwner*,
USHORT, size_t, size_t, const sort_key_def*,
ULONG, size_t, size_t, const sort_key_def*,
FPTR_REJECT_DUP_CALLBACK, void*, FB_UINT64 = 0);
~Sort();
@ -263,7 +263,7 @@ private:
SR* m_last_record; // Address of last record
sort_record** m_first_pointer; // Memory for sort
sort_record** m_next_pointer; // Address for next pointer
USHORT m_longs; // Length of record in longwords
ULONG m_longs; // Length of record in longwords
ULONG m_key_length; // Key length
ULONG m_unique_length; // Unique key length, used when duplicates eliminated
FB_UINT64 m_records; // Number of records