From 77bae396df3f6f883f58f1877b7c08eb3ebb6b63 Mon Sep 17 00:00:00 2001 From: David Rowley Date: Mon, 4 Apr 2022 22:24:59 +1200 Subject: [PATCH] Adjust tuplesort API to have bitwise option flags This replaces the bool flag for randomAccess. An upcoming patch requires adding another option, so instead of breaking the API for that, then breaking it again one day if we add more options, let's just break it once. Any boolean options we add in the future will just make use of an unused bit in the flags. Any extensions making use of tuplesorts will need to update their code to pass TUPLESORT_RANDOMACCESS instead of true for randomAccess. TUPLESORT_NONE can be used for a set of empty options. Author: David Rowley Reviewed-by: Justin Pryzby Discussion: https://postgr.es/m/CAApHDvoH4ASzsAOyHcxkuY01Qf%2B%2B8JJ0paw%2B03dk%2BW25tQEcNQ%40mail.gmail.com --- src/backend/access/gist/gistbuild.c | 2 +- src/backend/access/hash/hashsort.c | 2 +- src/backend/access/heap/heapam_handler.c | 2 +- src/backend/access/nbtree/nbtsort.c | 6 +- src/backend/catalog/index.c | 2 +- src/backend/executor/nodeAgg.c | 6 +- src/backend/executor/nodeIncrementalSort.c | 4 +- src/backend/executor/nodeSort.c | 8 +- src/backend/utils/adt/orderedsetaggs.c | 10 +- src/backend/utils/sort/tuplesort.c | 111 +++++++++++---------- src/include/utils/tuplesort.h | 19 ++-- 11 files changed, 99 insertions(+), 73 deletions(-) diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index e081e6571a4..f5a5caff8ec 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -271,7 +271,7 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) index, maintenance_work_mem, NULL, - false); + TUPLESORT_NONE); /* Scan the table, adding all tuples to the tuplesort */ reltuples = table_index_build_scan(heap, index, indexInfo, true, true, diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c index 6d8512283a8..aa61e39f26a 100644 --- a/src/backend/access/hash/hashsort.c +++ b/src/backend/access/hash/hashsort.c @@ -86,7 +86,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets) hspool->max_buckets, maintenance_work_mem, NULL, - false); + TUPLESORT_NONE); return hspool; } diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index dee264e8596..3a9532cb4f7 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -726,7 +726,7 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, if (use_sort) tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex, maintenance_work_mem, - NULL, false); + NULL, TUPLESORT_NONE); else tuplesort = NULL; diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index c074513efa1..9f60fa9894b 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -436,7 +436,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate, tuplesort_begin_index_btree(heap, index, buildstate->isunique, buildstate->nulls_not_distinct, maintenance_work_mem, coordinate, - false); + TUPLESORT_NONE); /* * If building a unique index, put dead tuples in a second spool to keep @@ -475,7 +475,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate, */ buildstate->spool2->sortstate = tuplesort_begin_index_btree(heap, index, false, false, work_mem, - coordinate2, false); + coordinate2, TUPLESORT_NONE); } /* Fill spool using either serial or parallel heap scan */ @@ -1939,7 +1939,7 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2, btspool->isunique, btspool->nulls_not_distinct, sortmem, coordinate, - false); + TUPLESORT_NONE); /* * Just as with serial case, there may be a second spool. If so, a diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index dd715ca0609..55800c94786 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -3364,7 +3364,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) state.tuplesort = tuplesort_begin_datum(INT8OID, Int8LessOperator, InvalidOid, false, maintenance_work_mem, - NULL, false); + NULL, TUPLESORT_NONE); state.htups = state.itups = state.tups_inserted = 0; /* ambulkdelete updates progress metrics */ diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 08cf569d8fa..23030a32a59 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -530,7 +530,7 @@ initialize_phase(AggState *aggstate, int newphase) sortnode->collations, sortnode->nullsFirst, work_mem, - NULL, false); + NULL, TUPLESORT_NONE); } aggstate->current_phase = newphase; @@ -607,7 +607,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans, pertrans->sortOperators[0], pertrans->sortCollations[0], pertrans->sortNullsFirst[0], - work_mem, NULL, false); + work_mem, NULL, TUPLESORT_NONE); } else pertrans->sortstates[aggstate->current_set] = @@ -617,7 +617,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans, pertrans->sortOperators, pertrans->sortCollations, pertrans->sortNullsFirst, - work_mem, NULL, false); + work_mem, NULL, TUPLESORT_NONE); } /* diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c index d6fb56dec73..4f50bc845da 100644 --- a/src/backend/executor/nodeIncrementalSort.c +++ b/src/backend/executor/nodeIncrementalSort.c @@ -315,7 +315,7 @@ switchToPresortedPrefixMode(PlanState *pstate) &(plannode->sort.nullsFirst[nPresortedCols]), work_mem, NULL, - false); + TUPLESORT_NONE); node->prefixsort_state = prefixsort_state; } else @@ -616,7 +616,7 @@ ExecIncrementalSort(PlanState *pstate) plannode->sort.nullsFirst, work_mem, NULL, - false); + TUPLESORT_NONE); node->fullsort_state = fullsort_state; } else diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c index 9481a622bf5..a113d737955 100644 --- a/src/backend/executor/nodeSort.c +++ b/src/backend/executor/nodeSort.c @@ -77,6 +77,7 @@ ExecSort(PlanState *pstate) Sort *plannode = (Sort *) node->ss.ps.plan; PlanState *outerNode; TupleDesc tupDesc; + int tuplesortopts = TUPLESORT_NONE; SO1_printf("ExecSort: %s\n", "sorting subplan"); @@ -96,6 +97,9 @@ ExecSort(PlanState *pstate) outerNode = outerPlanState(node); tupDesc = ExecGetResultType(outerNode); + if (node->randomAccess) + tuplesortopts |= TUPLESORT_RANDOMACCESS; + if (node->datumSort) tuplesortstate = tuplesort_begin_datum(TupleDescAttr(tupDesc, 0)->atttypid, plannode->sortOperators[0], @@ -103,7 +107,7 @@ ExecSort(PlanState *pstate) plannode->nullsFirst[0], work_mem, NULL, - node->randomAccess); + tuplesortopts); else tuplesortstate = tuplesort_begin_heap(tupDesc, plannode->numCols, @@ -113,7 +117,7 @@ ExecSort(PlanState *pstate) plannode->nullsFirst, work_mem, NULL, - node->randomAccess); + tuplesortopts); if (node->bounded) tuplesort_set_bound(tuplesortstate, node->bound); node->tuplesortstate = (void *) tuplesortstate; diff --git a/src/backend/utils/adt/orderedsetaggs.c b/src/backend/utils/adt/orderedsetaggs.c index 96dae6ec4a8..6d4f6b7dca2 100644 --- a/src/backend/utils/adt/orderedsetaggs.c +++ b/src/backend/utils/adt/orderedsetaggs.c @@ -118,6 +118,7 @@ ordered_set_startup(FunctionCallInfo fcinfo, bool use_tuples) OSAPerQueryState *qstate; MemoryContext gcontext; MemoryContext oldcontext; + int tuplesortopt; /* * Check we're called as aggregate (and not a window function), and get @@ -283,6 +284,11 @@ ordered_set_startup(FunctionCallInfo fcinfo, bool use_tuples) osastate->qstate = qstate; osastate->gcontext = gcontext; + tuplesortopt = TUPLESORT_NONE; + + if (qstate->rescan_needed) + tuplesortopt |= TUPLESORT_RANDOMACCESS; + /* * Initialize tuplesort object. */ @@ -295,7 +301,7 @@ ordered_set_startup(FunctionCallInfo fcinfo, bool use_tuples) qstate->sortNullsFirsts, work_mem, NULL, - qstate->rescan_needed); + tuplesortopt); else osastate->sortstate = tuplesort_begin_datum(qstate->sortColType, qstate->sortOperator, @@ -303,7 +309,7 @@ ordered_set_startup(FunctionCallInfo fcinfo, bool use_tuples) qstate->sortNullsFirst, work_mem, NULL, - qstate->rescan_needed); + tuplesortopt); osastate->number_of_rows = 0; osastate->sort_done = false; diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 10676299dc8..a8a5cc52047 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -246,7 +246,7 @@ struct Tuplesortstate { TupSortStatus status; /* enumerated value as shown above */ int nKeys; /* number of columns in sort key */ - bool randomAccess; /* did caller request random access? */ + int sortopt; /* Bitmask of flags used to setup sort */ bool bounded; /* did caller specify a maximum number of * tuples to return? */ bool boundUsed; /* true if we made use of a bounded heap */ @@ -564,12 +564,12 @@ struct Sharedsort * may or may not match the in-memory representation of the tuple --- * any conversion needed is the job of the writetup and readtup routines. * - * If state->randomAccess is true, then the stored representation of the - * tuple must be followed by another "unsigned int" that is a copy of the - * length --- so the total tape space used is actually sizeof(unsigned int) - * more than the stored length value. This allows read-backwards. When - * randomAccess is not true, the write/read routines may omit the extra - * length word. + * If state->sortopt contains TUPLESORT_RANDOMACCESS, then the stored + * representation of the tuple must be followed by another "unsigned int" that + * is a copy of the length --- so the total tape space used is actually + * sizeof(unsigned int) more than the stored length value. This allows + * read-backwards. When the random access flag was not specified, the + * write/read routines may omit the extra length word. * * writetup is expected to write both length words as well as the tuple * data. When readtup is called, the tape is positioned just after the @@ -614,7 +614,7 @@ struct Sharedsort static Tuplesortstate *tuplesort_begin_common(int workMem, SortCoordinate coordinate, - bool randomAccess); + int sortopt); static void tuplesort_begin_batch(Tuplesortstate *state); static void puttuple_common(Tuplesortstate *state, SortTuple *tuple); static bool consider_abort_common(Tuplesortstate *state); @@ -806,21 +806,20 @@ qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) * Each variant of tuplesort_begin has a workMem parameter specifying the * maximum number of kilobytes of RAM to use before spilling data to disk. * (The normal value of this parameter is work_mem, but some callers use - * other values.) Each variant also has a randomAccess parameter specifying - * whether the caller needs non-sequential access to the sort result. + * other values.) Each variant also has a sortopt which is a bitmask of + * sort options. See TUPLESORT_* definitions in tuplesort.h */ static Tuplesortstate * -tuplesort_begin_common(int workMem, SortCoordinate coordinate, - bool randomAccess) +tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt) { Tuplesortstate *state; MemoryContext maincontext; MemoryContext sortcontext; MemoryContext oldcontext; - /* See leader_takeover_tapes() remarks on randomAccess support */ - if (coordinate && randomAccess) + /* See leader_takeover_tapes() remarks on random access support */ + if (coordinate && (sortopt & TUPLESORT_RANDOMACCESS)) elog(ERROR, "random access disallowed under parallel sort"); /* @@ -857,7 +856,7 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate, pg_rusage_init(&state->ru_start); #endif - state->randomAccess = randomAccess; + state->sortopt = sortopt; state->tuples = true; /* @@ -991,10 +990,10 @@ tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, - int workMem, SortCoordinate coordinate, bool randomAccess) + int workMem, SortCoordinate coordinate, int sortopt) { Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate, - randomAccess); + sortopt); MemoryContext oldcontext; int i; @@ -1006,7 +1005,7 @@ tuplesort_begin_heap(TupleDesc tupDesc, if (trace_sort) elog(LOG, "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", - nkeys, workMem, randomAccess ? 't' : 'f'); + nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); #endif state->nKeys = nkeys; @@ -1015,7 +1014,7 @@ tuplesort_begin_heap(TupleDesc tupDesc, false, /* no unique check */ nkeys, workMem, - randomAccess, + sortopt & TUPLESORT_RANDOMACCESS, PARALLEL_SORT(state)); state->comparetup = comparetup_heap; @@ -1065,10 +1064,10 @@ Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, - SortCoordinate coordinate, bool randomAccess) + SortCoordinate coordinate, int sortopt) { Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate, - randomAccess); + sortopt); BTScanInsert indexScanKey; MemoryContext oldcontext; int i; @@ -1082,7 +1081,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc, elog(LOG, "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", RelationGetNumberOfAttributes(indexRel), - workMem, randomAccess ? 't' : 'f'); + workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); #endif state->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel); @@ -1091,7 +1090,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc, false, /* no unique check */ state->nKeys, workMem, - randomAccess, + sortopt & TUPLESORT_RANDOMACCESS, PARALLEL_SORT(state)); state->comparetup = comparetup_cluster; @@ -1172,10 +1171,10 @@ tuplesort_begin_index_btree(Relation heapRel, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, - bool randomAccess) + int sortopt) { Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate, - randomAccess); + sortopt); BTScanInsert indexScanKey; MemoryContext oldcontext; int i; @@ -1187,7 +1186,7 @@ tuplesort_begin_index_btree(Relation heapRel, elog(LOG, "begin index sort: unique = %c, workMem = %d, randomAccess = %c", enforceUnique ? 't' : 'f', - workMem, randomAccess ? 't' : 'f'); + workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); #endif state->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel); @@ -1196,7 +1195,7 @@ tuplesort_begin_index_btree(Relation heapRel, enforceUnique, state->nKeys, workMem, - randomAccess, + sortopt & TUPLESORT_RANDOMACCESS, PARALLEL_SORT(state)); state->comparetup = comparetup_index_btree; @@ -1254,10 +1253,10 @@ tuplesort_begin_index_hash(Relation heapRel, uint32 max_buckets, int workMem, SortCoordinate coordinate, - bool randomAccess) + int sortopt) { Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate, - randomAccess); + sortopt); MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(state->maincontext); @@ -1270,7 +1269,8 @@ tuplesort_begin_index_hash(Relation heapRel, high_mask, low_mask, max_buckets, - workMem, randomAccess ? 't' : 'f'); + workMem, + sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); #endif state->nKeys = 1; /* Only one sort column, the hash code */ @@ -1298,10 +1298,10 @@ tuplesort_begin_index_gist(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, - bool randomAccess) + int sortopt) { Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate, - randomAccess); + sortopt); MemoryContext oldcontext; int i; @@ -1311,7 +1311,7 @@ tuplesort_begin_index_gist(Relation heapRel, if (trace_sort) elog(LOG, "begin index sort: workMem = %d, randomAccess = %c", - workMem, randomAccess ? 't' : 'f'); + workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); #endif state->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel); @@ -1354,10 +1354,10 @@ tuplesort_begin_index_gist(Relation heapRel, Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, - SortCoordinate coordinate, bool randomAccess) + SortCoordinate coordinate, int sortopt) { Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate, - randomAccess); + sortopt); MemoryContext oldcontext; int16 typlen; bool typbyval; @@ -1368,7 +1368,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, if (trace_sort) elog(LOG, "begin datum sort: workMem = %d, randomAccess = %c", - workMem, randomAccess ? 't' : 'f'); + workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); #endif state->nKeys = 1; /* always a one-column sort */ @@ -1377,7 +1377,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, false, /* no unique check */ 1, workMem, - randomAccess, + sortopt & TUPLESORT_RANDOMACCESS, PARALLEL_SORT(state)); state->comparetup = comparetup_datum; @@ -2272,7 +2272,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward, switch (state->status) { case TSS_SORTEDINMEM: - Assert(forward || state->randomAccess); + Assert(forward || state->sortopt & TUPLESORT_RANDOMACCESS); Assert(!state->slabAllocatorUsed); if (forward) { @@ -2316,7 +2316,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward, break; case TSS_SORTEDONTAPE: - Assert(forward || state->randomAccess); + Assert(forward || state->sortopt & TUPLESORT_RANDOMACCESS); Assert(state->slabAllocatorUsed); /* @@ -3091,7 +3091,8 @@ mergeruns(Tuplesortstate *state) * sorted tape, we can stop at this point and do the final merge * on-the-fly. */ - if (!state->randomAccess && state->nInputRuns <= state->nInputTapes + if ((state->sortopt & TUPLESORT_RANDOMACCESS) == 0 + && state->nInputRuns <= state->nInputTapes && !WORKER(state)) { /* Tell logtape.c we won't be writing anymore */ @@ -3337,7 +3338,7 @@ tuplesort_rescan(Tuplesortstate *state) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); - Assert(state->randomAccess); + Assert(state->sortopt & TUPLESORT_RANDOMACCESS); switch (state->status) { @@ -3370,7 +3371,7 @@ tuplesort_markpos(Tuplesortstate *state) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); - Assert(state->randomAccess); + Assert(state->sortopt & TUPLESORT_RANDOMACCESS); switch (state->status) { @@ -3401,7 +3402,7 @@ tuplesort_restorepos(Tuplesortstate *state) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); - Assert(state->randomAccess); + Assert(state->sortopt & TUPLESORT_RANDOMACCESS); switch (state->status) { @@ -3998,7 +3999,8 @@ writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup) LogicalTapeWrite(tape, (void *) &tuplen, sizeof(tuplen)); LogicalTapeWrite(tape, (void *) tupbody, tupbodylen); - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeWrite(tape, (void *) &tuplen, sizeof(tuplen)); if (!state->slabAllocatorUsed) @@ -4021,7 +4023,8 @@ readtup_heap(Tuplesortstate *state, SortTuple *stup, /* read in the tuple proper */ tuple->t_len = tuplen; LogicalTapeReadExact(tape, tupbody, tupbodylen); - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen)); stup->tuple = (void *) tuple; /* set up first-column key value */ @@ -4233,7 +4236,8 @@ writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup) LogicalTapeWrite(tape, &tuplen, sizeof(tuplen)); LogicalTapeWrite(tape, &tuple->t_self, sizeof(ItemPointerData)); LogicalTapeWrite(tape, tuple->t_data, tuple->t_len); - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeWrite(tape, &tuplen, sizeof(tuplen)); if (!state->slabAllocatorUsed) @@ -4259,7 +4263,8 @@ readtup_cluster(Tuplesortstate *state, SortTuple *stup, tuple->t_tableOid = InvalidOid; /* Read in the tuple body */ LogicalTapeReadExact(tape, tuple->t_data, tuple->t_len); - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen)); stup->tuple = (void *) tuple; /* set up first-column key value, if it's a simple column */ @@ -4483,7 +4488,8 @@ writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup) tuplen = IndexTupleSize(tuple) + sizeof(tuplen); LogicalTapeWrite(tape, (void *) &tuplen, sizeof(tuplen)); LogicalTapeWrite(tape, (void *) tuple, IndexTupleSize(tuple)); - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeWrite(tape, (void *) &tuplen, sizeof(tuplen)); if (!state->slabAllocatorUsed) @@ -4501,7 +4507,8 @@ readtup_index(Tuplesortstate *state, SortTuple *stup, IndexTuple tuple = (IndexTuple) readtup_alloc(state, tuplen); LogicalTapeReadExact(tape, tuple, tuplen); - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen)); stup->tuple = (void *) tuple; /* set up first-column key value */ @@ -4571,7 +4578,8 @@ writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup) LogicalTapeWrite(tape, (void *) &writtenlen, sizeof(writtenlen)); LogicalTapeWrite(tape, waddr, tuplen); - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeWrite(tape, (void *) &writtenlen, sizeof(writtenlen)); if (!state->slabAllocatorUsed && stup->tuple) @@ -4611,7 +4619,8 @@ readtup_datum(Tuplesortstate *state, SortTuple *stup, stup->tuple = raddr; } - if (state->randomAccess) /* need trailing length word? */ + if (state->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length + * word? */ LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen)); } diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h index da5ba591989..345f4ce8024 100644 --- a/src/include/utils/tuplesort.h +++ b/src/include/utils/tuplesort.h @@ -86,6 +86,12 @@ typedef enum SORT_SPACE_TYPE_MEMORY } TuplesortSpaceType; +/* Bitwise option flags for tuple sorts */ +#define TUPLESORT_NONE 0 + +/* specifies whether non-sequential access to the sort result is required */ +#define TUPLESORT_RANDOMACCESS (1 << 0) + typedef struct TuplesortInstrumentation { TuplesortMethod sortMethod; /* sort algorithm used */ @@ -201,32 +207,33 @@ extern Tuplesortstate *tuplesort_begin_heap(TupleDesc tupDesc, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, - bool randomAccess); + int sortopt); extern Tuplesortstate *tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, - SortCoordinate coordinate, bool randomAccess); + SortCoordinate coordinate, + int sortopt); extern Tuplesortstate *tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, - bool randomAccess); + int sortopt); extern Tuplesortstate *tuplesort_begin_index_hash(Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, - bool randomAccess); + int sortopt); extern Tuplesortstate *tuplesort_begin_index_gist(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, - bool randomAccess); + int sortopt); extern Tuplesortstate *tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, - bool randomAccess); + int sortopt); extern void tuplesort_set_bound(Tuplesortstate *state, int64 bound); extern bool tuplesort_used_bound(Tuplesortstate *state); -- 2.39.5