8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-22 22:43:03 +01:00

Remove dependency of jrd_nod in ExprNode

This commit is contained in:
asfernandes 2010-11-14 22:31:42 +00:00
parent 3eb252ec5f
commit 4dea3a42a1
16 changed files with 372 additions and 382 deletions

View File

@ -301,15 +301,15 @@ ExprNode* AggNode::pass2(thread_db* tdbb, CompilerScratch* csb)
ExprNode::pass2(tdbb, csb);
dsc desc;
CMP_get_desc(tdbb, csb, node, &desc);
node->nod_impure = CMP_impure(csb, sizeof(impure_value_ex));
getDesc(tdbb, csb, &desc);
impureOffset = CMP_impure(csb, sizeof(impure_value_ex));
return this;
}
void AggNode::aggInit(thread_db* /*tdbb*/, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->vlux_count = 0;
if (distinct)
@ -390,7 +390,7 @@ void AggNode::aggFinish(thread_db* /*tdbb*/, jrd_req* request) const
dsc* AggNode::execute(thread_db* tdbb, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (impure->vlu_blob)
{
@ -521,7 +521,7 @@ void AvgAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
desc->dsc_length = sizeof(SINT64);
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
node->nod_scale = desc->dsc_scale;
nodScale = desc->dsc_scale;
break;
case dtype_unknown:
@ -546,7 +546,7 @@ void AvgAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
desc->dsc_scale = 0;
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
node->nod_flags |= nod_double;
nodFlags |= FLAG_DOUBLE;
break;
}
}
@ -555,6 +555,7 @@ ValueExprNode* AvgAggNode::copy(thread_db* tdbb, NodeCopier& copier)
{
AvgAggNode* node = FB_NEW(*tdbb->getDefaultPool()) AvgAggNode(*tdbb->getDefaultPool(),
distinct, dialect1);
node->nodScale = nodScale;
node->arg = copier.copy(tdbb, arg);
return node;
}
@ -562,7 +563,7 @@ ValueExprNode* AvgAggNode::copy(thread_db* tdbb, NodeCopier& copier)
ExprNode* AvgAggNode::pass2(thread_db* tdbb, CompilerScratch* csb)
{
if (dialect1)
node->nod_flags |= nod_double;
nodFlags |= FLAG_DOUBLE;
// We need a second descriptor in the impure area for AVG.
tempImpure = CMP_impure(csb, sizeof(impure_value_ex));
@ -574,7 +575,7 @@ void AvgAggNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (dialect1)
{
@ -585,24 +586,24 @@ void AvgAggNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
// Initialize the result area as an int64. If the field being aggregated is approximate
// numeric, the first call to add will convert the descriptor to double.
impure->make_int64(0, node->nod_scale);
impure->make_int64(0, nodScale);
}
}
void AvgAggNode::aggPass(thread_db* /*tdbb*/, jrd_req* request, dsc* desc) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
++impure->vlux_count;
if (dialect1)
ArithmeticNode::add(desc, impure, node, blr_add);
ArithmeticNode::add(desc, impure, this, blr_add);
else
ArithmeticNode::add2(desc, impure, node, blr_add);
ArithmeticNode::add2(desc, impure, this, blr_add);
}
dsc* AvgAggNode::aggExecute(thread_db* tdbb, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (!impure->vlux_count)
return NULL;
@ -681,6 +682,7 @@ ValueExprNode* ListAggNode::copy(thread_db* tdbb, NodeCopier& copier)
{
ListAggNode* node = FB_NEW(*tdbb->getDefaultPool()) ListAggNode(*tdbb->getDefaultPool(),
distinct);
node->nodScale = nodScale;
node->arg = copier.copy(tdbb, arg);
node->delimiter = copier.copy(tdbb, delimiter);
return node;
@ -692,14 +694,14 @@ void ListAggNode::aggInit(thread_db* tdbb, jrd_req* request) const
// We don't know here what should be the sub-type and text-type.
// Defer blob creation for when first record is found.
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->vlu_blob = NULL;
impure->vlu_desc.dsc_dtype = 0;
}
void ListAggNode::aggPass(thread_db* tdbb, jrd_req* request, dsc* desc) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (!impure->vlu_blob)
{
@ -736,7 +738,7 @@ void ListAggNode::aggPass(thread_db* tdbb, jrd_req* request, dsc* desc) const
dsc* ListAggNode::aggExecute(thread_db* tdbb, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (distinct)
{
@ -807,6 +809,7 @@ ValueExprNode* CountAggNode::copy(thread_db* tdbb, NodeCopier& copier)
{
CountAggNode* node = FB_NEW(*tdbb->getDefaultPool()) CountAggNode(*tdbb->getDefaultPool(),
distinct);
node->nodScale = nodScale;
node->arg = copier.copy(tdbb, arg);
return node;
}
@ -815,19 +818,19 @@ void CountAggNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->make_long(0);
}
void CountAggNode::aggPass(thread_db* /*tdbb*/, jrd_req* request, dsc* /*desc*/) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
++impure->vlu_misc.vlu_long;
}
dsc* CountAggNode::aggExecute(thread_db* /*tdbb*/, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (!impure->vlu_desc.dsc_dtype)
return NULL;
@ -921,7 +924,7 @@ void SumAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
case dtype_short:
desc->dsc_dtype = dtype_long;
desc->dsc_length = sizeof(SLONG);
node->nod_scale = desc->dsc_scale;
nodScale = desc->dsc_scale;
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
return;
@ -929,7 +932,7 @@ void SumAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
case dtype_unknown:
desc->dsc_dtype = dtype_unknown;
desc->dsc_length = 0;
node->nod_scale = 0;
nodScale = 0;
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
return;
@ -946,7 +949,7 @@ void SumAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
desc->dsc_scale = 0;
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
node->nod_flags |= nod_double;
nodFlags |= FLAG_DOUBLE;
return;
case dtype_quad:
@ -954,8 +957,8 @@ void SumAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
desc->dsc_length = sizeof(SQUAD);
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
node->nod_scale = desc->dsc_scale;
node->nod_flags |= nod_quad;
nodScale = desc->dsc_scale;
nodFlags |= FLAG_QUAD;
#ifdef NATIVE_QUAD
return;
#endif
@ -982,14 +985,14 @@ void SumAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
case dtype_int64:
desc->dsc_dtype = dtype_int64;
desc->dsc_length = sizeof(SINT64);
node->nod_scale = desc->dsc_scale;
nodScale = desc->dsc_scale;
desc->dsc_flags = 0;
return;
case dtype_unknown:
desc->dsc_dtype = dtype_unknown;
desc->dsc_length = 0;
node->nod_scale = 0;
nodScale = 0;
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
return;
@ -1004,7 +1007,7 @@ void SumAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
desc->dsc_scale = 0;
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
node->nod_flags |= nod_double;
nodFlags |= FLAG_DOUBLE;
return;
case dtype_quad:
@ -1012,8 +1015,8 @@ void SumAggNode::getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc)
desc->dsc_length = sizeof(SQUAD);
desc->dsc_sub_type = 0;
desc->dsc_flags = 0;
node->nod_scale = desc->dsc_scale;
node->nod_flags |= nod_quad;
nodScale = desc->dsc_scale;
nodFlags |= FLAG_QUAD;
#ifdef NATIVE_QUAD
return;
#endif
@ -1042,6 +1045,7 @@ ValueExprNode* SumAggNode::copy(thread_db* tdbb, NodeCopier& copier)
{
SumAggNode* node = FB_NEW(*tdbb->getDefaultPool()) SumAggNode(*tdbb->getDefaultPool(),
distinct, dialect1);
node->nodScale = nodScale;
node->arg = copier.copy(tdbb, arg);
return node;
}
@ -1050,7 +1054,7 @@ void SumAggNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (dialect1)
impure->make_long(0);
@ -1058,24 +1062,24 @@ void SumAggNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
// Initialize the result area as an int64. If the field being aggregated is approximate
// numeric, the first call to add will convert the descriptor to double.
impure->make_int64(0, node->nod_scale);
impure->make_int64(0, nodScale);
}
}
void SumAggNode::aggPass(thread_db* /*tdbb*/, jrd_req* request, dsc* desc) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
++impure->vlux_count;
if (dialect1)
ArithmeticNode::add(desc, impure, node, blr_add);
ArithmeticNode::add(desc, impure, this, blr_add);
else
ArithmeticNode::add2(desc, impure, node, blr_add);
ArithmeticNode::add2(desc, impure, this, blr_add);
}
dsc* SumAggNode::aggExecute(thread_db* /*tdbb*/, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (!impure->vlux_count)
return NULL;
@ -1125,6 +1129,7 @@ ValueExprNode* MaxMinAggNode::copy(thread_db* tdbb, NodeCopier& copier)
{
MaxMinAggNode* node = FB_NEW(*tdbb->getDefaultPool()) MaxMinAggNode(*tdbb->getDefaultPool(),
type);
node->nodScale = nodScale;
node->arg = copier.copy(tdbb, arg);
return node;
}
@ -1133,13 +1138,13 @@ void MaxMinAggNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->vlu_desc.dsc_dtype = 0;
}
void MaxMinAggNode::aggPass(thread_db* tdbb, jrd_req* request, dsc* desc) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
++impure->vlux_count;
if (!impure->vlu_desc.dsc_dtype)
@ -1156,7 +1161,7 @@ void MaxMinAggNode::aggPass(thread_db* tdbb, jrd_req* request, dsc* desc) const
dsc* MaxMinAggNode::aggExecute(thread_db* /*tdbb*/, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
if (!impure->vlux_count)
return NULL;

View File

@ -87,7 +87,7 @@ namespace
bool BoolExprNode::computable(CompilerScratch* csb, SSHORT stream, bool idxUse,
bool allowOnlyCurrentStream)
{
if (flags & FLAG_DEOPTIMIZE)
if (nodFlags & FLAG_DEOPTIMIZE)
return false;
return ExprNode::computable(csb, stream, idxUse, allowOnlyCurrentStream);
@ -99,13 +99,13 @@ BoolExprNode* BoolExprNode::pass2(thread_db* tdbb, CompilerScratch* csb)
ExprNode::pass2(tdbb, csb);
pass2Boolean2(tdbb, csb);
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
// Bind values of invariant nodes to top-level RSE (if present)
if (csb->csb_current_nodes.hasData())
{
LegacyNodeOrRseNode& topRseNode = csb->csb_current_nodes[0];
RseOrExprNode& topRseNode = csb->csb_current_nodes[0];
fb_assert(topRseNode.rseNode);
if (!topRseNode.rseNode->rse_invariants)
@ -201,7 +201,7 @@ BoolExprNode* BinaryBoolNode::copy(thread_db* tdbb, NodeCopier& copier)
{
BinaryBoolNode* node = FB_NEW(*tdbb->getDefaultPool()) BinaryBoolNode(*tdbb->getDefaultPool(),
blrOp);
node->flags = flags;
node->nodFlags = nodFlags;
node->arg1 = arg1->copy(tdbb, copier);
node->arg2 = arg2->copy(tdbb, copier);
return node;
@ -562,7 +562,7 @@ BoolExprNode* ComparativeBoolNode::copy(thread_db* tdbb, NodeCopier& copier)
{
ComparativeBoolNode* node = FB_NEW(*tdbb->getDefaultPool()) ComparativeBoolNode(
*tdbb->getDefaultPool(), blrOp);
node->flags = flags;
node->nodFlags = nodFlags;
node->arg1 = copier.copy(tdbb, arg1);
node->arg2 = copier.copy(tdbb, arg2);
@ -591,7 +591,7 @@ BoolExprNode* ComparativeBoolNode::pass1(thread_db* tdbb, CompilerScratch* csb)
if (invariantCheck)
{
// We need to take care of invariantness expressions to be able to pre-compile the pattern.
flags |= FLAG_INVARIANT;
nodFlags |= FLAG_INVARIANT;
csb->csb_current_nodes.push(this);
}
@ -606,11 +606,11 @@ BoolExprNode* ComparativeBoolNode::pass1(thread_db* tdbb, CompilerScratch* csb)
// If there is no top-level RSE present and patterns are not constant, unmark node as invariant
// because it may be dependent on data or variables.
if ((flags & FLAG_INVARIANT) &&
if ((nodFlags & FLAG_INVARIANT) &&
(!ExprNode::is<LiteralNode>(arg2.getObject()) ||
(arg3 && !ExprNode::is<LiteralNode>(arg3.getObject()))))
{
const LegacyNodeOrRseNode* ctx_node, *end;
const RseOrExprNode* ctx_node, *end;
for (ctx_node = csb->csb_current_nodes.begin(), end = csb->csb_current_nodes.end();
ctx_node != end; ++ctx_node)
@ -620,7 +620,7 @@ BoolExprNode* ComparativeBoolNode::pass1(thread_db* tdbb, CompilerScratch* csb)
}
if (ctx_node >= end)
flags &= ~FLAG_INVARIANT;
nodFlags &= ~FLAG_INVARIANT;
}
}
@ -629,7 +629,7 @@ BoolExprNode* ComparativeBoolNode::pass1(thread_db* tdbb, CompilerScratch* csb)
void ComparativeBoolNode::pass2Boolean1(thread_db* /*tdbb*/, CompilerScratch* csb)
{
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
csb->csb_invariants.push(&impureOffset);
}
@ -647,8 +647,8 @@ void ComparativeBoolNode::pass2Boolean2(thread_db* tdbb, CompilerScratch* csb)
if (DTYPE_IS_DATE(descriptor_c.dsc_dtype))
{
arg1->nod_flags |= nod_date;
arg2->nod_flags |= nod_date;
arg1->asExpr()->nodFlags |= FLAG_DATE;
arg2->asExpr()->nodFlags |= FLAG_DATE;
}
}
@ -663,11 +663,11 @@ void ComparativeBoolNode::pass2Boolean2(thread_db* tdbb, CompilerScratch* csb)
CMP_get_desc(tdbb, csb, arg2, &descriptor_b);
if (DTYPE_IS_DATE(descriptor_a.dsc_dtype))
arg2->nod_flags |= nod_date;
arg2->asExpr()->nodFlags |= FLAG_DATE;
else if (DTYPE_IS_DATE(descriptor_b.dsc_dtype))
arg1->nod_flags |= nod_date;
arg1->asExpr()->nodFlags |= FLAG_DATE;
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
// This may currently happen for nod_like, nod_contains and nod_similar
impureOffset = CMP_impure(csb, sizeof(impure_value));
@ -694,7 +694,7 @@ bool ComparativeBoolNode::execute(thread_db* tdbb, jrd_req* request) const
force_equal |= request->req_flags & req_same_tx_upd;
// Currently only nod_like, nod_contains, nod_starts and nod_similar may be marked invariant
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure_value* impure = request->getImpure<impure_value>(impureOffset);
@ -935,7 +935,7 @@ bool ComparativeBoolNode::stringBoolean(thread_db* tdbb, jrd_req* request, dsc*
if (request->req_flags & req_null)
{
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure_value* impure = request->getImpure<impure_value>(impureOffset);
impure->vlu_flags |= VLU_computed;
@ -968,7 +968,7 @@ bool ComparativeBoolNode::stringBoolean(thread_db* tdbb, jrd_req* request, dsc*
PatternMatcher* evaluator;
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure_value* impure = request->getImpure<impure_value>(impureOffset);
@ -1014,7 +1014,7 @@ bool ComparativeBoolNode::stringBoolean(thread_db* tdbb, jrd_req* request, dsc*
ret_val = evaluator->result();
if (!(flags & FLAG_INVARIANT))
if (!(nodFlags & FLAG_INVARIANT))
delete evaluator;
break;
@ -1025,7 +1025,7 @@ bool ComparativeBoolNode::stringBoolean(thread_db* tdbb, jrd_req* request, dsc*
{
PatternMatcher* evaluator;
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure_value* impure = request->getImpure<impure_value>(impureOffset);
if (!(impure->vlu_flags & VLU_computed))
@ -1068,7 +1068,7 @@ bool ComparativeBoolNode::stringBoolean(thread_db* tdbb, jrd_req* request, dsc*
ret_val = evaluator->result();
if (!(flags & FLAG_INVARIANT))
if (!(nodFlags & FLAG_INVARIANT))
delete evaluator;
break;
@ -1093,7 +1093,7 @@ bool ComparativeBoolNode::stringFunction(thread_db* tdbb, jrd_req* request,
// Handle contains and starts
if (blrOp == blr_containing || blrOp == blr_starting)
{
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure_value* impure = request->getImpure<impure_value>(impureOffset);
PatternMatcher* evaluator;
@ -1145,7 +1145,7 @@ bool ComparativeBoolNode::stringFunction(thread_db* tdbb, jrd_req* request,
dsc* desc = EVL_expr(tdbb, arg3);
if (request->req_flags & req_null)
{
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure_value* impure = request->getImpure<impure_value>(impureOffset);
impure->vlu_flags |= VLU_computed;
@ -1174,7 +1174,7 @@ bool ComparativeBoolNode::stringFunction(thread_db* tdbb, jrd_req* request,
}
}
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure_value* impure = request->getImpure<impure_value>(impureOffset);
PatternMatcher* evaluator;
@ -1381,7 +1381,7 @@ BoolExprNode* MissingBoolNode::copy(thread_db* tdbb, NodeCopier& copier)
{
MissingBoolNode* node = FB_NEW(*tdbb->getDefaultPool()) MissingBoolNode(
*tdbb->getDefaultPool());
node->flags = flags;
node->nodFlags = nodFlags;
node->arg = copier.copy(tdbb, arg);
return node;
}
@ -1457,7 +1457,7 @@ void NotBoolNode::genBlr(DsqlCompilerScratch* dsqlScratch)
BoolExprNode* NotBoolNode::copy(thread_db* tdbb, NodeCopier& copier)
{
NotBoolNode* node = FB_NEW(*tdbb->getDefaultPool()) NotBoolNode(*tdbb->getDefaultPool());
node->flags = flags;
node->nodFlags = nodFlags;
node->arg = arg->copy(tdbb, copier);
return node;
}
@ -1469,9 +1469,9 @@ BoolExprNode* NotBoolNode::pass1(thread_db* tdbb, CompilerScratch* csb)
if (rseBoolean)
{
if (rseBoolean->blrOp == blr_ansi_any)
rseBoolean->flags |= FLAG_DEOPTIMIZE | FLAG_ANSI_NOT;
rseBoolean->nodFlags |= FLAG_DEOPTIMIZE | FLAG_ANSI_NOT;
else if (rseBoolean->blrOp == blr_ansi_all)
rseBoolean->flags |= FLAG_ANSI_NOT;
rseBoolean->nodFlags |= FLAG_ANSI_NOT;
}
return BoolExprNode::pass1(tdbb, csb);
@ -1685,7 +1685,7 @@ BoolExprNode* RseBoolNode::copy(thread_db* tdbb, NodeCopier& copier)
{
RseBoolNode* node = FB_NEW(*tdbb->getDefaultPool()) RseBoolNode(
*tdbb->getDefaultPool(), blrOp);
node->flags = flags;
node->nodFlags = nodFlags;
node->rse = copier.copy(tdbb, rse);
return node;
@ -1701,14 +1701,14 @@ BoolExprNode* RseBoolNode::pass1(thread_db* tdbb, CompilerScratch* csb)
if (newNode)
return newNode->pass1(tdbb, csb);
flags |= FLAG_DEOPTIMIZE;
nodFlags |= FLAG_DEOPTIMIZE;
}
// fall into
case blr_ansi_any:
if (flags & FLAG_DEOPTIMIZE)
if (nodFlags & FLAG_DEOPTIMIZE)
{
flags &= ~FLAG_DEOPTIMIZE;
nodFlags &= ~FLAG_DEOPTIMIZE;
fb_assert(rse->nod_type == nod_class_recsrcnode_jrd);
RseNode* rseNode = reinterpret_cast<RseNode*>(rse->nod_arg[0]);
@ -1726,7 +1726,7 @@ BoolExprNode* RseBoolNode::pass1(thread_db* tdbb, CompilerScratch* csb)
// Deoptimize the injected boolean of a quantified predicate
// when it's necessary. ALL predicate does not require an index scan.
// This fixes bug SF #543106.
boolean->flags |= FLAG_DEOPTIMIZE;
boolean->nodFlags |= FLAG_DEOPTIMIZE;
}
}
// fall into
@ -1754,7 +1754,7 @@ void RseBoolNode::pass2Boolean1(thread_db* tdbb, CompilerScratch* csb)
if (!(rseNode->flags & RseNode::FLAG_VARIANT))
{
flags |= FLAG_INVARIANT;
nodFlags |= FLAG_INVARIANT;
csb->csb_invariants.push(&impureOffset);
}
@ -1767,7 +1767,7 @@ void RseBoolNode::pass2Boolean2(thread_db* tdbb, CompilerScratch* csb)
RseNode* rseNode = reinterpret_cast<RseNode*>(rse->nod_arg[0]);
fb_assert(rseNode->type == RseNode::TYPE);
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
impureOffset = CMP_impure(csb, sizeof(impure_value));
rsb = CMP_post_rse(tdbb, csb, rseNode);
@ -1779,7 +1779,7 @@ void RseBoolNode::pass2Boolean2(thread_db* tdbb, CompilerScratch* csb)
if (blrOp == blr_ansi_any || blrOp == blr_ansi_all)
{
const bool ansiAny = blrOp == blr_ansi_any;
const bool ansiNot = flags & FLAG_ANSI_NOT;
const bool ansiNot = nodFlags & FLAG_ANSI_NOT;
FilteredStream* const filter = static_cast<FilteredStream*>(rsb.getObject());
filter->setAnyBoolean(rseNode->rse_boolean, ansiAny, ansiNot);
}
@ -1792,7 +1792,7 @@ bool RseBoolNode::execute(thread_db* tdbb, jrd_req* request) const
USHORT* invariant_flags;
impure_value* impure;
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
impure = request->getImpure<impure_value>(impureOffset);
invariant_flags = &impure->vlu_flags;
@ -1823,7 +1823,7 @@ bool RseBoolNode::execute(thread_db* tdbb, jrd_req* request) const
// If this is an invariant node, save the return value.
if (flags & FLAG_INVARIANT)
if (nodFlags & FLAG_INVARIANT)
{
*invariant_flags |= VLU_computed;

File diff suppressed because it is too large Load Diff

View File

@ -60,8 +60,8 @@ public:
virtual dsc* execute(thread_db* tdbb, jrd_req* request) const;
// add and add2 are used in somewhat obscure way in aggregation.
static dsc* add(const dsc* desc, impure_value* value, const jrd_nod* node, UCHAR blrOp);
static dsc* add2(const dsc* desc, impure_value* value, const jrd_nod* node, UCHAR blrOp);
static dsc* add(const dsc* desc, impure_value* value, const ValueExprNode* node, UCHAR blrOp);
static dsc* add2(const dsc* desc, impure_value* value, const ValueExprNode* node, UCHAR blrOp);
private:
dsc* multiply(const dsc* desc, impure_value* value) const;

View File

@ -206,32 +206,14 @@ class DmlNode : public Node
{
public:
explicit DmlNode(MemoryPool& pool)
: Node(pool),
node(NULL)
: Node(pool)
{
}
jrd_nod* getNode()
{
return node;
}
void setNode(jrd_nod* value)
{
node = value;
}
public:
virtual DmlNode* pass1(thread_db* tdbb, CompilerScratch* csb, jrd_nod* aNode);
virtual DmlNode* pass2(thread_db* tdbb, CompilerScratch* csb, jrd_nod* aNode);
public:
virtual void genBlr(DsqlCompilerScratch* dsqlScratch) = 0;
virtual DmlNode* pass1(thread_db* tdbb, CompilerScratch* csb) = 0;
virtual DmlNode* pass2(thread_db* tdbb, CompilerScratch* csb) = 0;
protected:
NestConst<jrd_nod> node;
};
@ -296,9 +278,24 @@ public:
TYPE_VARIABLE
};
// Generic flags.
static const unsigned FLAG_INVARIANT = 0x01; // Node is recognized as being invariant.
// Boolean flags.
static const unsigned FLAG_DEOPTIMIZE = 0x02; // Boolean which requires deoptimization.
static const unsigned FLAG_ANSI_NOT = 0x04; // ANY/ALL predicate is prefixed with a NOT one.
// Value flags.
static const unsigned FLAG_QUAD = 0x08; // Compute in quad (default is long).
static const unsigned FLAG_DOUBLE = 0x10;
static const unsigned FLAG_DATE = 0x20;
static const unsigned FLAG_VALUE = 0x40; // Full value area required in impure space.
explicit ExprNode(Type aType, MemoryPool& pool)
: DmlNode(pool),
type(aType),
nodFlags(0),
impureOffset(0),
dsqlCompatDialectVerb(NULL),
dsqlChildNodes(pool),
jrdChildNodes(pool)
@ -440,6 +437,8 @@ protected:
public:
const Type type;
unsigned nodFlags;
ULONG impureOffset;
const char* dsqlCompatDialectVerb;
Firebird::Array<dsql_nod**> dsqlChildNodes;
Firebird::Array<JrdNode> jrdChildNodes;
@ -448,14 +447,8 @@ public:
class BoolExprNode : public ExprNode
{
public:
static const unsigned FLAG_DEOPTIMIZE = 0x1; // Boolean which requires deoptimization.
static const unsigned FLAG_ANSI_NOT = 0x2; // ANY/ALL predicate is prefixed with a NOT one.
static const unsigned FLAG_INVARIANT = 0x4; // Node is recognized as being invariant.
BoolExprNode(Type aType, MemoryPool& pool)
: ExprNode(aType, pool),
impureOffset(0),
flags(0)
: ExprNode(aType, pool)
{
}
@ -486,17 +479,14 @@ public:
virtual BoolExprNode* copy(thread_db* tdbb, NodeCopier& copier) = 0;
virtual bool execute(thread_db* tdbb, jrd_req* request) const = 0;
public:
ULONG impureOffset;
unsigned flags;
};
class ValueExprNode : public ExprNode
{
public:
ValueExprNode(Type aType, MemoryPool& pool)
: ExprNode(aType, pool)
: ExprNode(aType, pool),
nodScale(0)
{
}
@ -518,6 +508,9 @@ public:
virtual void getDesc(thread_db* tdbb, CompilerScratch* csb, dsc* desc) = 0;
virtual ValueExprNode* copy(thread_db* tdbb, NodeCopier& copier) = 0;
virtual dsc* execute(thread_db* tdbb, jrd_req* request) const = 0;
public:
SCHAR nodScale;
};
class AggNode : public TypedNode<ValueExprNode, ExprNode::TYPE_AGGREGATE>
@ -698,16 +691,30 @@ class StmtNode : public DmlNode
{
public:
explicit StmtNode(MemoryPool& pool)
: DmlNode(pool)
: DmlNode(pool),
node(NULL)
{
}
public:
jrd_nod* getNode()
{
return node;
}
void setNode(jrd_nod* value)
{
node = value;
}
virtual void pass2Cursor(RseNode*& /*rsePtr*/, Cursor**& /*cursorPtr*/)
{
}
virtual const jrd_nod* execute(thread_db* tdbb, jrd_req* request) const = 0;
protected:
NestConst<jrd_nod> node;
};

View File

@ -53,22 +53,6 @@ using namespace Jrd;
namespace Jrd {
DmlNode* DmlNode::pass1(thread_db* tdbb, CompilerScratch* csb, jrd_nod* aNode)
{
node = aNode;
return pass1(tdbb, csb);
}
DmlNode* DmlNode::pass2(thread_db* tdbb, CompilerScratch* csb, jrd_nod* aNode)
{
node = aNode;
return pass2(tdbb, csb);
}
//--------------------
StmtNode* SavepointEncloseNode::make(MemoryPool& pool, DsqlCompilerScratch* dsqlScratch, StmtNode* node)
{
if (dsqlScratch->errorHandlers)

View File

@ -118,7 +118,7 @@ void DenseRankWinNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->make_int64(0, 0);
}
@ -128,7 +128,7 @@ void DenseRankWinNode::aggPass(thread_db* /*tdbb*/, jrd_req* /*request*/, dsc* /
dsc* DenseRankWinNode::aggExecute(thread_db* /*tdbb*/, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
++impure->vlu_misc.vlu_int64;
return &impure->vlu_desc;
}
@ -178,20 +178,20 @@ void RankWinNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->make_int64(1, 0);
impure->vlux_count = 0;
}
void RankWinNode::aggPass(thread_db* /*tdbb*/, jrd_req* request, dsc* /*desc*/) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
++impure->vlux_count;
}
dsc* RankWinNode::aggExecute(thread_db* tdbb, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
dsc temp;
temp.makeInt64(0, &impure->vlu_misc.vlu_int64);
@ -243,7 +243,7 @@ void RowNumberWinNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->make_int64(0, 0);
}
@ -253,13 +253,13 @@ void RowNumberWinNode::aggPass(thread_db* /*tdbb*/, jrd_req* /*request*/, dsc* /
dsc* RowNumberWinNode::aggExecute(thread_db* /*tdbb*/, jrd_req* request) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
return &impure->vlu_desc;
}
dsc* RowNumberWinNode::winPass(thread_db* /*tdbb*/, jrd_req* request, SlidingWindow* /*window*/) const
{
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
++impure->vlu_misc.vlu_int64;
return &impure->vlu_desc;
}
@ -304,7 +304,7 @@ void LagLeadWinNode::aggInit(thread_db* tdbb, jrd_req* request) const
{
AggNode::aggInit(tdbb, request);
impure_value_ex* impure = request->getImpure<impure_value_ex>(node->nod_impure);
impure_value_ex* impure = request->getImpure<impure_value_ex>(impureOffset);
impure->make_int64(0, 0);
}

View File

@ -1910,7 +1910,7 @@ bool OptimizerRetrieval::matchBoolean(IndexScratch* indexScratch, BoolExprNode*
CMP_get_desc(tdbb, csb, match, &desc1);
CMP_get_desc(tdbb, csb, value, &desc2);
if (!BTR_types_comparable(desc1, desc2, value->nod_flags))
if (!BTR_types_comparable(desc1, desc2, value->asExpr()->nodFlags))
return false;
// if the indexed column is of type int64, we need to inject an
@ -1925,24 +1925,24 @@ bool OptimizerRetrieval::matchBoolean(IndexScratch* indexScratch, BoolExprNode*
CastNode* cast = FB_NEW(*tdbb->getDefaultPool()) CastNode(*tdbb->getDefaultPool());
cast->source = value;
cast->format = format;
cast->impureOffset = CMP_impure(csb, sizeof(impure_value));
value = PAR_make_node(tdbb, 1);
value->nod_type = nod_class_exprnode_jrd;
value->nod_count = 0;
value->nod_arg[0] = reinterpret_cast<jrd_nod*>(cast);
value->nod_impure = CMP_impure(csb, sizeof(impure_value));
if (value2)
{
cast = FB_NEW(*tdbb->getDefaultPool()) CastNode(*tdbb->getDefaultPool());
cast->source = value2;
cast->format = format;
cast->impureOffset = CMP_impure(csb, sizeof(impure_value));
value2 = PAR_make_node(tdbb, 1);
value2->nod_type = nod_class_exprnode_jrd;
value2->nod_count = 0;
value2->nod_arg[0] = reinterpret_cast<jrd_nod*>(cast);
value2->nod_impure = CMP_impure(csb, sizeof(impure_value));
}
}
}

View File

@ -75,7 +75,7 @@ void SortNode::pass1(thread_db* tdbb, CompilerScratch* csb)
void SortNode::pass2(thread_db* tdbb, CompilerScratch* csb)
{
for (NestConst<jrd_nod>* i = expressions.begin(); i != expressions.end(); ++i)
(*i)->nod_flags |= nod_value;
(*i)->asExpr()->nodFlags |= ExprNode::FLAG_VALUE;
for (NestConst<jrd_nod>* i = expressions.begin(); i != expressions.end(); ++i)
CMP_pass2(tdbb, csb, *i, NULL);
@ -1489,7 +1489,7 @@ void RseNode::pass1(thread_db* tdbb, CompilerScratch* csb, jrd_rel* /*view*/)
bool topLevelRse = true;
for (LegacyNodeOrRseNode* node = csb->csb_current_nodes.begin();
for (RseOrExprNode* node = csb->csb_current_nodes.begin();
node != csb->csb_current_nodes.end(); ++node)
{
if (node->rseNode)
@ -2328,7 +2328,7 @@ static void genDeliverUnmapped(thread_db* tdbb, BoolExprNodeStack* deliverStack,
deliverNode = newMissingNode;
}
deliverNode->flags = boolean->flags;
deliverNode->nodFlags = boolean->nodFlags;
deliverNode->impureOffset = boolean->impureOffset;
bool okNode = true;

View File

@ -2124,19 +2124,19 @@ bool BTR_types_comparable(const dsc& target, const dsc& source, const int flags)
if (target.dsc_dtype == dtype_sql_date)
{
return (DTYPE_IS_TEXT(source.dsc_dtype) || source.dsc_dtype == dtype_sql_date ||
(flags & nod_date));
(flags & ExprNode::FLAG_DATE));
}
if (target.dsc_dtype == dtype_sql_time)
{
return (DTYPE_IS_TEXT(source.dsc_dtype) || source.dsc_dtype == dtype_sql_time ||
(flags & nod_date));
(flags & ExprNode::FLAG_DATE));
}
if (target.dsc_dtype == dtype_timestamp)
{
return (DTYPE_IS_TEXT(source.dsc_dtype) || DTYPE_IS_DATE(source.dsc_dtype) ||
(flags & nod_date));
(flags & ExprNode::FLAG_DATE));
}
fb_assert(DTYPE_IS_BLOB(target.dsc_dtype));

View File

@ -815,9 +815,10 @@ jrd_nod* NodeCopier::copy(thread_db* tdbb, jrd_nod* input)
node = input;
else
{
copy->nodFlags = exprNode->nodFlags;
node = PAR_make_node(tdbb, 1);
node->nod_type = input->nod_type;
node->nod_flags = input->nod_flags;
node->nod_count = input->nod_count;
node->nod_arg[0] = reinterpret_cast<jrd_nod*>(copy);
}
@ -889,7 +890,6 @@ jrd_nod* NodeCopier::copy(thread_db* tdbb, jrd_nod* input)
case nod_dcl_cursor:
node = PAR_make_node(tdbb, e_dcl_cur_length);
node->nod_count = input->nod_count;
node->nod_flags = input->nod_flags;
node->nod_type = input->nod_type;
node->nod_arg[e_dcl_cur_rse] = copy(tdbb, input->nod_arg[e_dcl_cur_rse]);
node->nod_arg[e_dcl_cur_refs] = copy(tdbb, input->nod_arg[e_dcl_cur_refs]);
@ -899,7 +899,6 @@ jrd_nod* NodeCopier::copy(thread_db* tdbb, jrd_nod* input)
case nod_cursor_stmt:
node = PAR_make_node(tdbb, e_cursor_stmt_length);
node->nod_count = input->nod_count;
node->nod_flags = input->nod_flags;
node->nod_type = input->nod_type;
node->nod_arg[e_cursor_stmt_op] = input->nod_arg[e_cursor_stmt_op];
node->nod_arg[e_cursor_stmt_number] = input->nod_arg[e_cursor_stmt_number];
@ -917,7 +916,6 @@ jrd_nod* NodeCopier::copy(thread_db* tdbb, jrd_nod* input)
node = PAR_make_node(tdbb, args);
node->nod_count = input->nod_count;
node->nod_type = input->nod_type;
node->nod_flags = input->nod_flags;
jrd_nod** arg1 = input->nod_arg;
jrd_nod** arg2 = node->nod_arg;
@ -1199,7 +1197,7 @@ void CMP_mark_variant(CompilerScratch* csb, USHORT stream)
if (csb->csb_current_nodes.isEmpty())
return;
for (LegacyNodeOrRseNode* node = csb->csb_current_nodes.end() - 1;
for (RseOrExprNode* node = csb->csb_current_nodes.end() - 1;
node != csb->csb_current_nodes.begin(); --node)
{
if (node->rseNode)
@ -1208,13 +1206,8 @@ void CMP_mark_variant(CompilerScratch* csb, USHORT stream)
break;
node->rseNode->flags |= RseNode::FLAG_VARIANT;
}
else if (node->boolExprNode)
node->boolExprNode->flags &= ~BoolExprNode::FLAG_INVARIANT;
else
{
fb_assert(node->legacyNode->nod_type != nod_class_recsrcnode_jrd);
node->legacyNode->nod_flags &= ~nod_invariant;
}
else if (node->exprNode)
node->exprNode->nodFlags &= ~ExprNode::FLAG_INVARIANT;
}
}
@ -1352,10 +1345,17 @@ jrd_nod* CMP_pass1(thread_db* tdbb, CompilerScratch* csb, jrd_nod* node)
return node;
case nod_class_exprnode_jrd:
{
ExprNode* exprNode = reinterpret_cast<ExprNode*>(node->nod_arg[0]);
node->nod_arg[0] = reinterpret_cast<jrd_nod*>(exprNode->pass1(tdbb, csb));
}
return node;
case nod_class_stmtnode_jrd:
{
DmlNode* dmlNode = reinterpret_cast<DmlNode*>(node->nod_arg[0]);
node->nod_arg[0] = reinterpret_cast<jrd_nod*>(dmlNode->pass1(tdbb, csb, node));
StmtNode* stmtNode = reinterpret_cast<StmtNode*>(node->nod_arg[0]);
stmtNode->setNode(node);
node->nod_arg[0] = reinterpret_cast<jrd_nod*>(stmtNode->pass1(tdbb, csb));
}
return node;
@ -2161,9 +2161,37 @@ jrd_nod* CMP_pass2(thread_db* tdbb, CompilerScratch* csb, jrd_nod* const node, j
break;
case nod_class_exprnode_jrd:
{
ExprNode* exprNode = reinterpret_cast<ExprNode*>(node->nod_arg[0])->pass2(tdbb, csb);
node->nod_arg[0] = reinterpret_cast<jrd_nod*>(exprNode);
// Bind values of invariant nodes to top-level RSE (if present)
if (exprNode->nodFlags & ExprNode::FLAG_INVARIANT)
{
if (csb->csb_current_nodes.hasData())
{
RseOrExprNode& topRseNode = csb->csb_current_nodes[0];
fb_assert(topRseNode.rseNode);
if (!topRseNode.rseNode->rse_invariants)
{
topRseNode.rseNode->rse_invariants =
FB_NEW(*tdbb->getDefaultPool()) VarInvariantArray(*tdbb->getDefaultPool());
}
topRseNode.rseNode->rse_invariants->add(exprNode->impureOffset);
}
}
break;
}
case nod_class_stmtnode_jrd:
node->nod_arg[0] = reinterpret_cast<jrd_nod*>(
reinterpret_cast<DmlNode*>(node->nod_arg[0])->pass2(tdbb, csb, node));
{
StmtNode* stmtNode = reinterpret_cast<StmtNode*>(node->nod_arg[0]);
stmtNode->setNode(node);
node->nod_arg[0] = reinterpret_cast<jrd_nod*>(stmtNode->pass2(tdbb, csb));
}
break;
default:
@ -2171,24 +2199,6 @@ jrd_nod* CMP_pass2(thread_db* tdbb, CompilerScratch* csb, jrd_nod* const node, j
break;
}
// Bind values of invariant nodes to top-level RSE (if present)
if (node->nod_flags & nod_invariant)
{
if (csb->csb_current_nodes.hasData())
{
LegacyNodeOrRseNode& topRseNode = csb->csb_current_nodes[0];
fb_assert(topRseNode.rseNode);
if (!topRseNode.rseNode->rse_invariants)
{
topRseNode.rseNode->rse_invariants =
FB_NEW(*tdbb->getDefaultPool()) VarInvariantArray(*tdbb->getDefaultPool());
}
topRseNode.rseNode->rse_invariants->add(node->nod_impure);
}
}
// finish up processing of record selection expressions
if (rse_node)

View File

@ -137,8 +137,10 @@ dsc* EVL_assign_to(thread_db* tdbb, const jrd_nod* node)
DEV_BLKCHK(node, type_nod);
const ExprNode* exprNode = node->asExpr();
jrd_req* request = tdbb->getRequest();
impure_value* impure = request->getImpure<impure_value>(node->nod_impure);
impure_value* impure = request->getImpure<impure_value>(exprNode->impureOffset);
// The only nodes that can be assigned to are: argument, field and variable.
@ -316,32 +318,20 @@ dsc* EVL_expr(thread_db* tdbb, const jrd_nod* node)
if (--tdbb->tdbb_quantum < 0)
JRD_reschedule(tdbb, 0, true);
const ValueExprNode* exprNode = static_cast<const ValueExprNode*>(node->asExpr());
jrd_req* const request = tdbb->getRequest();
impure_value* const impure = request->getImpure<impure_value>(node->nod_impure);
impure_value* const impure = request->getImpure<impure_value>(exprNode->impureOffset);
request->req_flags &= ~req_null;
// Do a preliminary screen for either simple nodes or nodes that are special cased elsewhere
dsc* desc = exprNode->execute(tdbb, request);
switch (node->nod_type)
{
case nod_class_exprnode_jrd:
{
const ValueExprNode* exprNode = reinterpret_cast<const ValueExprNode*>(node->nod_arg[0]);
dsc* desc = exprNode->execute(tdbb, request);
if (desc)
request->req_flags &= ~req_null;
else
request->req_flags |= req_null;
if (desc)
request->req_flags &= ~req_null;
else
request->req_flags |= req_null;
return desc;
}
default:
BUGCHECK(232); // msg 232 EVL_expr: invalid operation
}
return NULL;
return desc;
}

View File

@ -96,8 +96,6 @@ public:
NestConst<jrd_nod> nod_parent;
ULONG nod_impure; // Inpure offset from request block
nod_t nod_type; // Type of node
USHORT nod_flags;
SCHAR nod_scale; // Target scale factor
USHORT nod_count; // Number of arguments
jrd_nod* nod_arg[1];
@ -114,13 +112,19 @@ public:
operator const jrd_nod* const* () const { return arg[0].getAddress(); }
} nod_arg;
***/
};
const int nod_quad = 1; // compute in quad (default is long)
const int nod_double = 2;
const int nod_date = 4;
const int nod_value = 8; // full value area required in impure space
const int nod_invariant = 16; // node is recognized as being invariant
const ExprNode* asExpr() const
{
fb_assert(nod_type == nod_class_exprnode_jrd);
return reinterpret_cast<const ExprNode*>(nod_arg[0]);
}
ExprNode* asExpr()
{
fb_assert(nod_type == nod_class_exprnode_jrd);
return reinterpret_cast<ExprNode*>(nod_arg[0]);
}
};
// Types of nulls placement for each column in sort order
const int rse_nulls_default = 0;
@ -540,31 +544,21 @@ struct ItemInfo
bool fullDomain;
};
struct LegacyNodeOrRseNode
struct RseOrExprNode
{
LegacyNodeOrRseNode(jrd_nod* aLegacyNode)
: legacyNode(aLegacyNode),
boolExprNode(NULL),
RseOrExprNode(ExprNode* aExprNode)
: exprNode(aExprNode),
rseNode(NULL)
{
}
LegacyNodeOrRseNode(BoolExprNode* aBoolExprNode)
: legacyNode(NULL),
boolExprNode(aBoolExprNode),
rseNode(NULL)
{
}
LegacyNodeOrRseNode(RseNode* aRseNode)
: legacyNode(NULL),
boolExprNode(NULL),
RseOrExprNode(RseNode* aRseNode)
: exprNode(NULL),
rseNode(aRseNode)
{
}
jrd_nod* legacyNode;
BoolExprNode* boolExprNode;
ExprNode* exprNode;
RseNode* rseNode;
};
@ -670,7 +664,7 @@ public:
Firebird::Array<const RecordSource*> csb_fors; // record sources
Firebird::Array<jrd_nod*> csb_exec_sta; // Array of exec_into nodes
Firebird::Array<ULONG*> csb_invariants; // stack of pointer to nodes invariant offsets
Firebird::Array<LegacyNodeOrRseNode> csb_current_nodes; // RseNode's and other invariant
Firebird::Array<RseOrExprNode> csb_current_nodes; // RseNode's and other invariant
// candidates within whose scope we are
USHORT csb_n_stream; // Next available stream
USHORT csb_msg_number; // Highest used message number

View File

@ -804,7 +804,7 @@ RecordSource* OPT_compile(thread_db* tdbb, CompilerScratch* csb, RseNode* rse,
// Deoptimize some conjuncts in advance
for (size_t iter = 0; iter < opt->opt_conjuncts.getCount(); iter++)
{
if (opt->opt_conjuncts[iter].opt_conjunct_node->flags & BoolExprNode::FLAG_DEOPTIMIZE)
if (opt->opt_conjuncts[iter].opt_conjunct_node->nodFlags & ExprNode::FLAG_DEOPTIMIZE)
{
// Fake an index match for them
opt->opt_conjuncts[iter].opt_conjunct_flags |= opt_conjunct_matched;
@ -1637,7 +1637,7 @@ static USHORT distribute_equalities(BoolExprNodeStack& org_stack, CompilerScratc
{
BoolExprNode* boolean = stack1.object();
if (boolean->flags & BoolExprNode::FLAG_DEOPTIMIZE)
if (boolean->nodFlags & ExprNode::FLAG_DEOPTIMIZE)
continue;
ComparativeBoolNode* cmpNode = boolean->as<ComparativeBoolNode>();
@ -3217,14 +3217,14 @@ static BoolExprNode* make_inference_node(CompilerScratch* csb, BoolExprNode* boo
// (2) invariantness of second argument of STARTING WITH or LIKE is solely
// determined by its dependency on any of the fields
// If provisions above change the line below will have to be modified
newCmpNode->flags = cmpNode->flags;
newCmpNode->nodFlags = cmpNode->nodFlags;
// Share impure area for cached invariant value used to hold pre-compiled
// pattern for new LIKE and CONTAINING algorithms.
// Proper cloning of impure area for this node would require careful accounting
// of new invariant dependencies - we avoid such hassles via using single
// cached pattern value for all node clones. This is faster too.
if (newCmpNode->flags & BoolExprNode::FLAG_INVARIANT)
if (newCmpNode->nodFlags & ExprNode::FLAG_INVARIANT)
newCmpNode->impureOffset = cmpNode->impureOffset;
// But substitute new values for some of the predicate arguments

View File

@ -321,7 +321,8 @@ AggregatedStream::State AggregatedStream::evaluateGroup(thread_db* tdbb, Aggrega
for (ptr = m_group->begin(), end = m_group->end(); ptr != end; ++ptr)
{
const jrd_nod* from = *ptr;
impure_value* impure = request->getImpure<impure_value>(from->nod_impure);
const ExprNode* fromExpr = from->asExpr();
impure_value* impure = request->getImpure<impure_value>(fromExpr->impureOffset);
desc = EVL_expr(tdbb, from);
@ -337,7 +338,8 @@ AggregatedStream::State AggregatedStream::evaluateGroup(thread_db* tdbb, Aggrega
for (ptr = m_order->begin(), end = m_order->end(); ptr != end; ++ptr)
{
const jrd_nod* from = *ptr;
impure_value* impure = request->getImpure<impure_value>(from->nod_impure);
const ExprNode* fromExpr = from->asExpr();
impure_value* impure = request->getImpure<impure_value>(fromExpr->impureOffset);
desc = EVL_expr(tdbb, from);
@ -368,7 +370,8 @@ AggregatedStream::State AggregatedStream::evaluateGroup(thread_db* tdbb, Aggrega
for (ptr = m_group->begin(), end = m_group->end(); ptr != end; ++ptr)
{
const jrd_nod* from = *ptr;
impure_value* impure = request->getImpure<impure_value>(from->nod_impure);
const ExprNode* fromExpr = from->asExpr();
impure_value* impure = request->getImpure<impure_value>(fromExpr->impureOffset);
if (impure->vlu_desc.dsc_address)
EVL_make_value(tdbb, &impure->vlu_desc, &vtemp);
@ -405,7 +408,8 @@ AggregatedStream::State AggregatedStream::evaluateGroup(thread_db* tdbb, Aggrega
for (ptr = m_order->begin(), end = m_order->end(); ptr != end; ++ptr)
{
const jrd_nod* from = *ptr;
impure_value* impure = request->getImpure<impure_value>(from->nod_impure);
const ExprNode* fromExpr = from->asExpr();
impure_value* impure = request->getImpure<impure_value>(fromExpr->impureOffset);
if (impure->vlu_desc.dsc_address)
EVL_make_value(tdbb, &impure->vlu_desc, &vtemp);

View File

@ -384,7 +384,7 @@ void TraceProcedureImpl::JrdParamsImpl::fillParams()
if ((param = ExprNode::as<ParameterNode>(prm)))
{
//const impure_value* impure = request->getImpure<impure_value>(prm->nod_impure)
//const impure_value* impure = request->getImpure<impure_value>(param->impureOffset)
const jrd_nod* message = param->message;
const Format* format = (Format*) message->nod_arg[e_msg_format];
const int arg_number = param->argNumber;
@ -405,7 +405,7 @@ void TraceProcedureImpl::JrdParamsImpl::fillParams()
else if ((var = ExprNode::as<VariableNode>(prm)))
{
impure_value* impure = const_cast<jrd_req*>(m_request)->getImpure<impure_value>(
prm->nod_impure);
var->impureOffset);
from_desc = &impure->vlu_desc;
}
else if ((literal = ExprNode::as<LiteralNode>(prm)))