* with heap relation locked first to prevent deadlocking). In hot
* standby mode this will raise an error when parentcheck is true.
*
- * There is no need for the usual indcheckxmin usability horizon test here,
- * even in the heapallindexed case, because index undergoing verification
- * only needs to have entries for a new transaction snapshot. (If this is
- * a parentcheck verification, there is no question about committed or
- * recently dead heap tuples lacking index entries due to concurrent
- * activity.)
+ * There is no need for the usual indcheckxmin usability horizon test
+ * here, even in the heapallindexed case, because index undergoing
+ * verification only needs to have entries for a new transaction snapshot.
+ * (If this is a parentcheck verification, there is no question about
+ * committed or recently dead heap tuples lacking index entries due to
+ * concurrent activity.)
*/
indrel = index_open(indrelid, lockmode);
* index fingerprinting should have reached all tuples returned by
* IndexBuildHeapScan().
*
- * In readonly case, we also check for problems with missing downlinks.
- * A second Bloom filter is used for this.
+ * In readonly case, we also check for problems with missing
+ * downlinks. A second Bloom filter is used for this.
*/
if (!state->readonly)
{
* READ COMMITTED mode. A new snapshot is guaranteed to have all
* the entries it requires in the index.
*
- * We must defend against the possibility that an old xact snapshot
- * was returned at higher isolation levels when that snapshot is
- * not safe for index scans of the target index. This is possible
- * when the snapshot sees tuples that are before the index's
- * indcheckxmin horizon. Throwing an error here should be very
- * rare. It doesn't seem worth using a secondary snapshot to avoid
- * this.
+ * We must defend against the possibility that an old xact
+ * snapshot was returned at higher isolation levels when that
+ * snapshot is not safe for index scans of the target index. This
+ * is possible when the snapshot sees tuples that are before the
+ * index's indcheckxmin horizon. Throwing an error here should be
+ * very rare. It doesn't seem worth using a secondary snapshot to
+ * avoid this.
*/
if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data),
}
else
{
- int64 total_pages;
+ int64 total_pages;
/*
* Extra readonly downlink check.
*
- * In readonly case, we know that there cannot be a concurrent page
- * split or a concurrent page deletion, which gives us the
+ * In readonly case, we know that there cannot be a concurrent
+ * page split or a concurrent page deletion, which gives us the
* opportunity to verify that every non-ignorable page had a
* downlink one level up. We must be tolerant of interrupted page
* splits and page deletions, though. This is taken care of in
}
/*
- * Create our own scan for IndexBuildHeapScan(), rather than getting it
- * to do so for us. This is required so that we can actually use the
- * MVCC snapshot registered earlier in !readonly case.
+ * Create our own scan for IndexBuildHeapScan(), rather than getting
+ * it to do so for us. This is required so that we can actually use
+ * the MVCC snapshot registered earlier in !readonly case.
*
* Note that IndexBuildHeapScan() calls heap_endscan() for us.
*/
{
/*
* Since there cannot be a concurrent VACUUM operation in readonly
- * mode, and since a page has no links within other pages (siblings
- * and parent) once it is marked fully deleted, it should be
- * impossible to land on a fully deleted page in readonly mode.
- * See bt_downlink_check() for further details.
+ * mode, and since a page has no links within other pages
+ * (siblings and parent) once it is marked fully deleted, it
+ * should be impossible to land on a fully deleted page in
+ * readonly mode. See bt_downlink_check() for further details.
*
* The bt_downlink_check() P_ISDELETED() check is repeated here so
* that pages that are only reachable through sibling links get
P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock);
/*
- * Check the number of attributes in high key. Note, rightmost page doesn't
- * contain a high key, so nothing to check
+ * Check the number of attributes in high key. Note, rightmost page
+ * doesn't contain a high key, so nothing to check
*/
if (!P_RIGHTMOST(topaque) &&
!_bt_check_natts(state->rel, state->target, P_HIKEY))
/*
* lp_len should match the IndexTuple reported length exactly, since
- * lp_len is completely redundant in indexes, and both sources of tuple
- * length are MAXALIGN()'d. nbtree does not use lp_len all that
+ * lp_len is completely redundant in indexes, and both sources of
+ * tuple length are MAXALIGN()'d. nbtree does not use lp_len all that
* frequently, and is surprisingly tolerant of corrupt lp_len fields.
*/
if (tupsize != ItemIdGetLength(itemid))
static void
bt_downlink_missing_check(BtreeCheckState *state)
{
- BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
- ItemId itemid;
- IndexTuple itup;
- Page child;
- BTPageOpaque copaque;
- uint32 level;
- BlockNumber childblk;
+ BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
+ ItemId itemid;
+ IndexTuple itup;
+ Page child;
+ BTPageOpaque copaque;
+ uint32 level;
+ BlockNumber childblk;
Assert(state->heapallindexed && state->readonly);
Assert(!P_IGNORE(topaque));
* page split in passing, when it notices that the left sibling page is
* P_INCOMPLETE_SPLIT().
*
- * In general, VACUUM is not prepared for there to be no downlink to a page
- * that it deletes. This is the main reason why the lack of a downlink can
- * be reported as corruption here. It's not obvious that an invalid
- * missing downlink can result in wrong answers to queries, though, since
- * index scans that land on the child may end up consistently moving right.
- * The handling of concurrent page splits (and page deletions) within
- * _bt_moveright() cannot distinguish inconsistencies that last for a
- * moment from inconsistencies that are permanent and irrecoverable.
+ * In general, VACUUM is not prepared for there to be no downlink to a
+ * page that it deletes. This is the main reason why the lack of a
+ * downlink can be reported as corruption here. It's not obvious that an
+ * invalid missing downlink can result in wrong answers to queries,
+ * though, since index scans that land on the child may end up
+ * consistently moving right. The handling of concurrent page splits (and
+ * page deletions) within _bt_moveright() cannot distinguish
+ * inconsistencies that last for a moment from inconsistencies that are
+ * permanent and irrecoverable.
*
* VACUUM isn't even prepared to delete pages that have no downlink due to
* an incomplete page split, but it can detect and reason about that case
/*
* Target is probably the "top parent" of a multi-level page deletion.
- * We'll need to descend the subtree to make sure that descendant pages are
- * consistent with that, though.
+ * We'll need to descend the subtree to make sure that descendant pages
+ * are consistent with that, though.
*
* If the target page (which must be non-ignorable) is a leaf page, then
* clearly it can't be the top parent. The lack of a downlink is probably
* bt_downlink_check() does not visit pages reachable through negative
* infinity items. Besides, bt_downlink_check() is unwilling to descend
* multiple levels. (The similar bt_downlink_check() P_ISDELETED() check
- * within bt_check_level_from_leftmost() won't reach the page either, since
- * the leaf's live siblings should have their sibling links updated to
- * bypass the deletion target page when it is marked fully dead.)
+ * within bt_check_level_from_leftmost() won't reach the page either,
+ * since the leaf's live siblings should have their sibling links updated
+ * to bypass the deletion target page when it is marked fully dead.)
*
* If this error is raised, it might be due to a previous multi-level page
- * deletion that failed to realize that it wasn't yet safe to mark the leaf
- * page as fully dead. A "dangling downlink" will still remain when this
- * happens. The fact that the dangling downlink's page (the leaf's
+ * deletion that failed to realize that it wasn't yet safe to mark the
+ * leaf page as fully dead. A "dangling downlink" will still remain when
+ * this happens. The fact that the dangling downlink's page (the leaf's
* parent/ancestor page) lacked a downlink is incidental.
*/
if (P_ISDELETED(copaque))
(uint32) state->targetlsn)));
/*
- * Iff leaf page is half-dead, its high key top parent link should point to
- * what VACUUM considered to be the top parent page at the instant it was
- * interrupted. Provided the high key link actually points to the target
- * page, the missing downlink we detected is consistent with there having
- * been an interrupted multi-level page deletion. This means that the
- * subtree with the target page at its root (a page deletion chain) is in a
- * consistent state, enabling VACUUM to resume deleting the entire chain
- * the next time it encounters the half-dead leaf page.
+ * Iff leaf page is half-dead, its high key top parent link should point
+ * to what VACUUM considered to be the top parent page at the instant it
+ * was interrupted. Provided the high key link actually points to the
+ * target page, the missing downlink we detected is consistent with there
+ * having been an interrupted multi-level page deletion. This means that
+ * the subtree with the target page at its root (a page deletion chain) is
+ * in a consistent state, enabling VACUUM to resume deleting the entire
+ * chain the next time it encounters the half-dead leaf page.
*/
if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
{
* are assumed immutable. While the LP_DEAD bit is mutable in leaf pages,
* that's ItemId metadata, which was not fingerprinted. (There will often
* be some dead-to-everyone IndexTuples fingerprinted by the Bloom filter,
- * but we only try to detect the absence of needed tuples, so that's okay.)
+ * but we only try to detect the absence of needed tuples, so that's
+ * okay.)
*
- * Note that we rely on deterministic index_form_tuple() TOAST compression.
- * If index_form_tuple() was ever enhanced to compress datums out-of-line,
- * or otherwise varied when or how compression was applied, our assumption
- * would break, leading to false positive reports of corruption. It's also
- * possible that non-pivot tuples could in the future have alternative
- * equivalent representations (e.g. by using the INDEX_ALT_TID_MASK bit).
- * For now, we don't decompress/normalize toasted values as part of
- * fingerprinting.
+ * Note that we rely on deterministic index_form_tuple() TOAST
+ * compression. If index_form_tuple() was ever enhanced to compress datums
+ * out-of-line, or otherwise varied when or how compression was applied,
+ * our assumption would break, leading to false positive reports of
+ * corruption. It's also possible that non-pivot tuples could in the
+ * future have alternative equivalent representations (e.g. by using the
+ * INDEX_ALT_TID_MASK bit). For now, we don't decompress/normalize toasted
+ * values as part of fingerprinting.
*/
itup = index_form_tuple(RelationGetDescr(index), values, isnull);
itup->t_tid = htup->t_self;
* Sanity checks for number of items on page.
*
* As noted at the beginning of _bt_binsrch(), an internal page must have
- * children, since there must always be a negative infinity downlink (there
- * may also be a highkey). In the case of non-rightmost leaf pages, there
- * must be at least a highkey.
+ * children, since there must always be a negative infinity downlink
+ * (there may also be a highkey). In the case of non-rightmost leaf
+ * pages, there must be at least a highkey.
*
- * This is correct when pages are half-dead, since internal pages are never
- * half-dead, and leaf pages must have a high key when half-dead (the
- * rightmost page can never be deleted). It's also correct with fully
- * deleted pages: _bt_unlink_halfdead_page() doesn't change anything about
- * the target page other than setting the page as fully dead, and setting
- * its xact field. In particular, it doesn't change the sibling links in
- * the deletion target itself, since they're required when index scans land
- * on the deletion target, and then need to move right (or need to move
- * left, in the case of backward index scans).
+ * This is correct when pages are half-dead, since internal pages are
+ * never half-dead, and leaf pages must have a high key when half-dead
+ * (the rightmost page can never be deleted). It's also correct with
+ * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything
+ * about the target page other than setting the page as fully dead, and
+ * setting its xact field. In particular, it doesn't change the sibling
+ * links in the deletion target itself, since they're required when index
+ * scans land on the deletion target, and then need to move right (or need
+ * to move left, in the case of backward index scans).
*/
maxoffset = PageGetMaxOffsetNumber(page);
if (maxoffset > MaxIndexTuplesPerPage)
static Datum
leftmostvalue_uuid(void)
{
- /* palloc0 will create the UUID with all zeroes: "00000000-0000-0000-0000-000000000000" */
- pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t));
+ /*
+ * palloc0 will create the UUID with all zeroes:
+ * "00000000-0000-0000-0000-000000000000"
+ */
+ pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t));
+
return UUIDPGetDatum(retval);
}
static Datum
leftmostvalue_name(void)
{
- NameData* result = (NameData *) palloc0(NAMEDATALEN);
+ NameData *result = (NameData *) palloc0(NAMEDATALEN);
+
return NameGetDatum(result);
}
if (coord <= 2 * DIM(cube))
{
/* dimension index */
- int index = (coord - 1) / 2;
+ int index = (coord - 1) / 2;
+
/* whether this is upper bound (lower bound otherwise) */
- bool upper = ((coord - 1) % 2 == 1);
+ bool upper = ((coord - 1) % 2 == 1);
if (IS_POINT(cube))
{
if (coord <= 2 * DIM(cube))
{
/* dimension index */
- int index = (coord - 1) / 2;
+ int index = (coord - 1) / 2;
+
/* whether this is upper bound (lower bound otherwise) */
- bool upper = ((coord - 1) % 2 == 1);
+ bool upper = ((coord - 1) % 2 == 1);
if (IS_POINT(cube))
{
else
{
/*
- * Return zero if coordinate is out of bound. That reproduces logic of
- * how cubes with low dimension number are expanded during GiST
+ * Return zero if coordinate is out of bound. That reproduces logic
+ * of how cubes with low dimension number are expanded during GiST
* indexing.
*/
result = 0.0;
static JsonbValue *SV_to_JsonbValue(SV *obj, JsonbParseState **ps, bool is_elem);
-static SV *
+static SV *
JsonbValue_to_SV(JsonbValue *jbv)
{
dTHX;
char *str = DatumGetCString(DirectFunctionCall1(numeric_out,
NumericGetDatum(jbv->val.numeric)));
SV *result = newSVnv(SvNV(cstr2sv(str)));
+
pfree(str);
return result;
}
char *str = pnstrdup(jbv->val.string.val,
jbv->val.string.len);
SV *result = cstr2sv(str);
+
pfree(str);
return result;
}
static PyObject *PLyObject_FromJsonbContainer(JsonbContainer *jsonb);
static JsonbValue *PLyObject_ToJsonbValue(PyObject *obj,
- JsonbParseState **jsonb_state, bool is_elem);
+ JsonbParseState **jsonb_state, bool is_elem);
#if PY_MAJOR_VERSION >= 3
typedef PyObject *(*PLyUnicode_FromStringAndSize_t)
out->type = jbvNull;
else if (PyString_Check(obj) || PyUnicode_Check(obj))
PLyString_ToJsonbValue(obj, out);
- /*
- * PyNumber_Check() returns true for booleans, so boolean check should come
- * first.
- */
+
+ /*
+ * PyNumber_Check() returns true for booleans, so boolean check should
+ * come first.
+ */
else if (PyBool_Check(obj))
{
out = palloc(sizeof(JsonbValue));
case SimilarityStrategyNumber:
case WordSimilarityStrategyNumber:
case StrictWordSimilarityStrategyNumber:
- /* Similarity search is exact. (Strict) word similarity search is inexact */
+
+ /*
+ * Similarity search is exact. (Strict) word similarity search is
+ * inexact
+ */
*recheck = (strategy != SimilarityStrategyNumber);
nlimit = index_strategy_get_limit(strategy);
/* Trigram bound type */
typedef uint8 TrgmBound;
-#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */
-#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */
+#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */
+#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */
/* Word similarity flags */
-#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar
- * search pattern in text */
-#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match
- * word bounds */
+#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar
+ * search pattern in text */
+#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match
+ * word bounds */
/*
* Module load callback
break;
}
- return 0.0; /* keep compiler quiet */
+ return 0.0; /* keep compiler quiet */
}
/*
/* Select appropriate threshold */
threshold = (flags & WORD_SIMILARITY_STRICT) ?
- strict_word_similarity_threshold :
- word_similarity_threshold;
+ strict_word_similarity_threshold :
+ word_similarity_threshold;
/*
- * Consider first trigram as initial lower bount for strict word similarity,
- * or initialize it later with first trigram present for plain word
- * similarity.
+ * Consider first trigram as initial lower bount for strict word
+ * similarity, or initialize it later with first trigram present for plain
+ * word similarity.
*/
lower = (flags & WORD_SIMILARITY_STRICT) ? 0 : -1;
* plain word similarity
*/
if ((flags & WORD_SIMILARITY_STRICT) ? (bounds[i] & TRGM_BOUND_RIGHT)
- : found[trgindex])
+ : found[trgindex])
{
int prev_lower,
tmp_ulen2,
smlr_max = Max(smlr_max, smlr_cur);
/*
- * if we only check that word similarity is greater than
- * threshold we do not need to calculate a maximum similarity.
+ * if we only check that word similarity is greater than threshold
+ * we do not need to calculate a maximum similarity.
*/
if ((flags & WORD_SIMILARITY_CHECK_ONLY) && smlr_max >= threshold)
break;
ulen1;
int *trg2indexes;
float4 result;
- TrgmBound *bounds;
+ TrgmBound *bounds;
protect_out_of_mem(slen1 + slen2);
&rows, &width, &startup_cost, &total_cost);
/*
- * The EPQ path must be at least as well sorted as the path itself,
- * in case it gets used as input to a mergejoin.
+ * The EPQ path must be at least as well sorted as the path itself, in
+ * case it gets used as input to a mergejoin.
*/
sorted_epq_path = epq_path;
if (sorted_epq_path != NULL &&
/* we're only interested if it is the primary key and valid */
if (index->indisprimary && IndexIsValid(index))
{
- int indnkeyatts = index->indnkeyatts;
+ int indnkeyatts = index->indnkeyatts;
if (indnkeyatts > 0)
{
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
static void pg_decode_truncate(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn,
- int nrelations, Relation relations[],
- ReorderBufferChange *change);
+ ReorderBufferTXN *txn,
+ int nrelations, Relation relations[],
+ ReorderBufferChange *change);
static bool pg_decode_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);
static void pg_decode_message(LogicalDecodingContext *ctx,
NULL, BUFFER_LOCK_SHARE, NULL);
if (!lastPageTuple)
{
- bool recorded;
+ bool recorded;
recorded = AutoVacuumRequestWork(AVW_BRINSummarizeRange,
RelationGetRelid(idxRel),
attno = slot->tts_nvalid;
/*
- * If tuple doesn't have all the atts indicated by attnum, read the
- * rest as NULLs or missing values
+ * If tuple doesn't have all the atts indicated by attnum, read the rest
+ * as NULLs or missing values
*/
if (attno < attnum)
slot_getmissingattrs(slot, attno, attnum);
Assert(IndexTupleSize(truncated) <= IndexTupleSize(source));
/*
- * Cannot leak memory here, TupleDescCopy() doesn't allocate any
- * inner structure, so, plain pfree() should clean all allocated memory
+ * Cannot leak memory here, TupleDescCopy() doesn't allocate any inner
+ * structure, so, plain pfree() should clean all allocated memory
*/
pfree(truncdesc);
bytea *
index_generic_reloptions(Datum reloptions, bool validate)
{
- int numoptions;
+ int numoptions;
GenericIndexOpts *idxopts;
- relopt_value *options;
+ relopt_value *options;
static const relopt_parse_elt tab[] = {
{"recheck_on_update", RELOPT_TYPE_BOOL, offsetof(GenericIndexOpts, recheck_on_update)}
};
idxopts = allocateReloptStruct(sizeof(GenericIndexOpts), options, numoptions);
- fillRelOptions((void *)idxopts, sizeof(GenericIndexOpts), options, numoptions,
+ fillRelOptions((void *) idxopts, sizeof(GenericIndexOpts), options, numoptions,
validate, tab, lengthof(tab));
pfree(options);
- return (bytea*) idxopts;
+ return (bytea *) idxopts;
}
/*
{
PredicateLockPageSplit(btree->index,
- BufferGetBlockNumber(stack->buffer),
- BufferGetBlockNumber(lbuffer));
+ BufferGetBlockNumber(stack->buffer),
+ BufferGetBlockNumber(lbuffer));
PredicateLockPageSplit(btree->index,
- BufferGetBlockNumber(stack->buffer),
- BufferGetBlockNumber(rbuffer));
+ BufferGetBlockNumber(stack->buffer),
+ BufferGetBlockNumber(rbuffer));
}
}
{
PredicateLockPageSplit(btree->index,
- BufferGetBlockNumber(stack->buffer),
- BufferGetBlockNumber(rbuffer));
+ BufferGetBlockNumber(stack->buffer),
+ BufferGetBlockNumber(rbuffer));
}
}
blkno = BufferGetBlockNumber(buffer);
/*
- * Copy a predicate lock from entry tree leaf (containing posting list)
- * to posting tree.
+ * Copy a predicate lock from entry tree leaf (containing posting list) to
+ * posting tree.
*/
PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno);
GinPredicateLockPage(Relation index, BlockNumber blkno, Snapshot snapshot)
{
/*
- * When fast update is on then no need in locking pages, because we
- * anyway need to lock the whole index.
+ * When fast update is on then no need in locking pages, because we anyway
+ * need to lock the whole index.
*/
if (!GinGetUseFastUpdate(index))
- PredicateLockPage(index, blkno, snapshot);
+ PredicateLockPage(index, blkno, snapshot);
}
/*
entry->buffer = stack->buffer;
/*
- * Predicate lock visited posting tree page, following pages
- * will be locked by moveRightIfItNeeded or entryLoadMoreItems
+ * Predicate lock visited posting tree page, following pages will
+ * be locked by moveRightIfItNeeded or entryLoadMoreItems
*/
GinPredicateLockPage(ginstate->index, BufferGetBlockNumber(entry->buffer), snapshot);
UnlockReleaseBuffer(metabuffer);
/*
- * If fast update is enabled, we acquire a predicate lock on the entire
- * relation as fast update postpones the insertion of tuples into index
- * structure due to which we can't detect rw conflicts.
+ * If fast update is enabled, we acquire a predicate lock on the
+ * entire relation as fast update postpones the insertion of tuples
+ * into index structure due to which we can't detect rw conflicts.
*/
if (GinGetUseFastUpdate(scan->indexRelation))
PredicateLockRelation(scan->indexRelation, scan->xs_snapshot);
/*
* With fastupdate on each scan and each insert begin with access to
- * pending list, so it effectively lock entire index. In this case
- * we aquire predicate lock and check for conflicts over index relation,
+ * pending list, so it effectively lock entire index. In this case we
+ * aquire predicate lock and check for conflicts over index relation,
* and hope that it will reduce locking overhead.
*
- * Do not use GinCheckForSerializableConflictIn() here, because
- * it will do nothing (it does actual work only with fastupdate off).
+ * Do not use GinCheckForSerializableConflictIn() here, because it
+ * will do nothing (it does actual work only with fastupdate off).
* Check for conflicts for entire index.
*/
CheckForSerializableConflictIn(index, NULL, InvalidBuffer);
}
else
{
- GinStatsData stats;
+ GinStatsData stats;
/*
* Fastupdate is off but if pending list isn't empty then we need to
ptr->page = BufferGetPage(ptr->buffer);
ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
PredicateLockPageSplit(rel,
- BufferGetBlockNumber(buffer),
- BufferGetBlockNumber(ptr->buffer));
+ BufferGetBlockNumber(buffer),
+ BufferGetBlockNumber(ptr->buffer));
}
/*
bool is_split;
/*
- * Check for any rw conflicts (in serialisation isolation level)
- * just before we intend to modify the page
+ * Check for any rw conflicts (in serialisation isolation level) just
+ * before we intend to modify the page
*/
CheckForSerializableConflictIn(state->r, NULL, stack->buffer);
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
true /* wait for commit */ ,
- &hufd, false /* changingPart */);
+ &hufd, false /* changingPart */ );
switch (result)
{
case HeapTupleSelfUpdated:
* functional index. Compare the new and old values of the indexed
* expression to see if we are able to use a HOT update or not.
*/
-static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup)
+static bool
+ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup)
{
- ListCell *l;
- List *indexoidlist = RelationGetIndexList(relation);
- EState *estate = CreateExecutorState();
- ExprContext *econtext = GetPerTupleExprContext(estate);
+ ListCell *l;
+ List *indexoidlist = RelationGetIndexList(relation);
+ EState *estate = CreateExecutorState();
+ ExprContext *econtext = GetPerTupleExprContext(estate);
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation));
- bool equals = true;
- Datum old_values[INDEX_MAX_KEYS];
- bool old_isnull[INDEX_MAX_KEYS];
- Datum new_values[INDEX_MAX_KEYS];
- bool new_isnull[INDEX_MAX_KEYS];
- int indexno = 0;
+ bool equals = true;
+ Datum old_values[INDEX_MAX_KEYS];
+ bool old_isnull[INDEX_MAX_KEYS];
+ Datum new_values[INDEX_MAX_KEYS];
+ bool new_isnull[INDEX_MAX_KEYS];
+ int indexno = 0;
+
econtext->ecxt_scantuple = slot;
foreach(l, indexoidlist)
{
if (bms_is_member(indexno, relation->rd_projidx))
{
- Oid indexOid = lfirst_oid(l);
- Relation indexDesc = index_open(indexOid, AccessShareLock);
+ Oid indexOid = lfirst_oid(l);
+ Relation indexDesc = index_open(indexOid, AccessShareLock);
IndexInfo *indexInfo = BuildIndexInfo(indexDesc);
- int i;
+ int i;
ResetExprContext(econtext);
ExecStoreTuple(oldtup, slot, InvalidBuffer, false);
else if (!old_isnull[i])
{
Form_pg_attribute att = TupleDescAttr(RelationGetDescr(indexDesc), i);
+
if (!datumIsEqual(old_values[i], new_values[i], att->attbyval, att->attlen))
{
equals = false;
/*
* This old multi cannot possibly have members still running, but
* verify just in case. If it was a locker only, it can be removed
- * without any further consideration; but if it contained an update, we
- * might need to preserve it.
+ * without any further consideration; but if it contained an update,
+ * we might need to preserve it.
*/
if (MultiXactIdIsRunning(multi,
HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
else
{
/*
- * Not in progress, not committed -- must be aborted or crashed;
- * we can ignore it.
+ * Not in progress, not committed -- must be aborted or
+ * crashed; we can ignore it.
*/
}
heap_xlog_update(record, false);
break;
case XLOG_HEAP_TRUNCATE:
+
/*
* TRUNCATE is a no-op because the actions are already logged as
* SMGR WAL records. TRUNCATE WAL record only exists for logical
* rightmost leaf, has enough free space to accommodate a new entry and
* the insertion key is strictly greater than the first key in this page,
* then we can safely conclude that the new key will be inserted in the
- * cached block. So we simply search within the cached block and insert the
- * key at the appropriate location. We call it a fastpath.
+ * cached block. So we simply search within the cached block and insert
+ * the key at the appropriate location. We call it a fastpath.
*
* Testing has revealed, though, that the fastpath can result in increased
* contention on the exclusive-lock on the rightmost leaf page. So we
- * conditionally check if the lock is available. If it's not available then
- * we simply abandon the fastpath and take the regular path. This makes
- * sense because unavailability of the lock also signals that some other
- * backend might be concurrently inserting into the page, thus reducing our
- * chances to finding an insertion place in this page.
+ * conditionally check if the lock is available. If it's not available
+ * then we simply abandon the fastpath and take the regular path. This
+ * makes sense because unavailability of the lock also signals that some
+ * other backend might be concurrently inserting into the page, thus
+ * reducing our chances to finding an insertion place in this page.
*/
top:
fastpath = false;
offset = InvalidOffsetNumber;
if (RelationGetTargetBlock(rel) != InvalidBlockNumber)
{
- Size itemsz;
- Page page;
- BTPageOpaque lpageop;
+ Size itemsz;
+ Page page;
+ BTPageOpaque lpageop;
/*
* Conditionally acquire exclusive lock on the buffer before doing any
* checks. If we don't get the lock, we simply follow slowpath. If we
- * do get the lock, this ensures that the index state cannot change, as
- * far as the rightmost part of the index is concerned.
+ * do get the lock, this ensures that the index state cannot change,
+ * as far as the rightmost part of the index is concerned.
*/
buf = ReadBuffer(rel, RelationGetTargetBlock(rel));
/*
* Check if the page is still the rightmost leaf page, has enough
- * free space to accommodate the new tuple, and the insertion
- * scan key is strictly greater than the first key on the page.
+ * free space to accommodate the new tuple, and the insertion scan
+ * key is strictly greater than the first key on the page.
*/
if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) &&
!P_IGNORE(lpageop) &&
ReleaseBuffer(buf);
/*
- * If someone's holding a lock, it's likely to change anyway,
- * so don't try again until we get an updated rightmost leaf.
+ * If someone's holding a lock, it's likely to change anyway, so
+ * don't try again until we get an updated rightmost leaf.
*/
RelationSetTargetBlock(rel, InvalidBlockNumber);
}
Buffer rbuf;
/*
- * If we're here then a pagesplit is needed. We should never reach here
- * if we're using the fastpath since we should have checked for all the
- * required conditions, including the fact that this page has enough
- * freespace. Note that this routine can in theory deal with the
- * situation where a NULL stack pointer is passed (that's what would
- * happen if the fastpath is taken), like it does during crash
+ * If we're here then a pagesplit is needed. We should never reach
+ * here if we're using the fastpath since we should have checked for
+ * all the required conditions, including the fact that this page has
+ * enough freespace. Note that this routine can in theory deal with
+ * the situation where a NULL stack pointer is passed (that's what
+ * would happen if the fastpath is taken), like it does during crash
* recovery. But that path is much slower, defeating the very purpose
- * of the optimization. The following assertion should protect us from
- * any future code changes that invalidate those assumptions.
+ * of the optimization. The following assertion should protect us
+ * from any future code changes that invalidate those assumptions.
*
* Note that whenever we fail to take the fastpath, we clear the
* cached block. Checking for a valid cached block at this point is
* enough to decide whether we're in a fastpath or not.
*/
Assert(!(P_ISLEAF(lpageop) &&
- BlockNumberIsValid(RelationGetTargetBlock(rel))));
+ BlockNumberIsValid(RelationGetTargetBlock(rel))));
/* Choose the split point */
firstright = _bt_findsplitloc(rel, page,
BTMetaPageData *metad = NULL;
OffsetNumber itup_off;
BlockNumber itup_blkno;
- BlockNumber cachedBlock = InvalidBlockNumber;
+ BlockNumber cachedBlock = InvalidBlockNumber;
itup_off = newitemoff;
itup_blkno = BufferGetBlockNumber(buf);
* We do this after dropping locks on all buffers. So the information
* about whether the insertion block is still the rightmost block or
* not may have changed in between. But we will deal with that during
- * next insert operation. No special care is required while setting it.
+ * next insert operation. No special care is required while setting
+ * it.
*/
if (BlockNumberIsValid(cachedBlock) &&
_bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL)
_bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact,
float8 numHeapTuples)
{
- Buffer metabuf;
- Page metapg;
+ Buffer metabuf;
+ Page metapg;
BTMetaPageData *metad;
- bool needsRewrite = false;
- XLogRecPtr recptr;
+ bool needsRewrite = false;
+ XLogRecPtr recptr;
/* read the metapage and check if it needs rewrite */
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
static bool
_bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
{
- Buffer metabuf;
- Page metapg;
+ Buffer metabuf;
+ Page metapg;
BTMetaPageData *metad;
- bool result = false;
+ bool result = false;
metabuf = _bt_getbuf(info->index, BTREE_METAPAGE, BT_READ);
metapg = BufferGetPage(metabuf);
}
else
{
- StdRdOptions *relopts;
- float8 cleanup_scale_factor;
+ StdRdOptions *relopts;
+ float8 cleanup_scale_factor;
/*
* If table receives large enough amount of insertions and no cleanup
*/
relopts = (StdRdOptions *) info->index->rd_options;
cleanup_scale_factor = (relopts &&
- relopts->vacuum_cleanup_index_scale_factor >= 0)
- ? relopts->vacuum_cleanup_index_scale_factor
- : vacuum_cleanup_index_scale_factor;
+ relopts->vacuum_cleanup_index_scale_factor >= 0)
+ ? relopts->vacuum_cleanup_index_scale_factor
+ : vacuum_cleanup_index_scale_factor;
if (cleanup_scale_factor < 0 ||
metad->btm_last_cleanup_num_heap_tuples < 0 ||
info->num_heap_tuples > (1.0 + cleanup_scale_factor) *
- metad->btm_last_cleanup_num_heap_tuples)
+ metad->btm_last_cleanup_num_heap_tuples)
result = true;
}
/* The ENSURE stuff ensures we clean up shared memory on failure */
PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel));
{
- TransactionId oldestBtpoXact;
+ TransactionId oldestBtpoXact;
cycleid = _bt_start_vacuum(rel);
*/
if (stats == NULL)
{
- TransactionId oldestBtpoXact;
+ TransactionId oldestBtpoXact;
/* Check if we need a cleanup */
if (!_bt_vacuum_needs_cleanup(info))
/*
* Truncate any non-key attributes from high key on leaf level
* (i.e. truncate on leaf level if we're building an INCLUDE
- * index). This is only done at the leaf level because
- * downlinks in internal pages are either negative infinity
- * items, or get their contents from copying from one level
- * down. See also: _bt_split().
+ * index). This is only done at the leaf level because downlinks
+ * in internal pages are either negative infinity items, or get
+ * their contents from copying from one level down. See also:
+ * _bt_split().
*
* Since the truncated tuple is probably smaller than the
* original, it cannot just be copied in place (besides, we want
* original high key, and add our own truncated high key at the
* same offset.
*
- * Note that the page layout won't be changed very much. oitup
- * is already located at the physical beginning of tuple space,
- * so we only shift the line pointer array back and forth, and
- * overwrite the latter portion of the space occupied by the
- * original tuple. This is fairly cheap.
+ * Note that the page layout won't be changed very much. oitup is
+ * already located at the physical beginning of tuple space, so we
+ * only shift the line pointer array back and forth, and overwrite
+ * the latter portion of the space occupied by the original tuple.
+ * This is fairly cheap.
*/
truncated = _bt_nonkey_truncate(wstate->index, oitup);
truncsz = IndexTupleSize(truncated);
*/
if (last_off == P_HIKEY)
{
- BTPageOpaque npageop;
+ BTPageOpaque npageop;
Assert(state->btps_minkey == NULL);
IndexTuple
_bt_nonkey_truncate(Relation rel, IndexTuple itup)
{
- int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel);
- IndexTuple truncated;
+ int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel);
+ IndexTuple truncated;
/*
- * We should only ever truncate leaf index tuples, which must have both key
- * and non-key attributes. It's never okay to truncate a second time.
+ * We should only ever truncate leaf index tuples, which must have both
+ * key and non-key attributes. It's never okay to truncate a second time.
*/
Assert(BTreeTupleGetNAtts(itup, rel) ==
IndexRelationGetNumberOfAttributes(rel));
bool
_bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
{
- int16 natts = IndexRelationGetNumberOfAttributes(rel);
- int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
- BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- IndexTuple itup;
+ int16 natts = IndexRelationGetNumberOfAttributes(rel);
+ int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
+ BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+ IndexTuple itup;
/*
* We cannot reliably test a deleted or half-deleted page, since they have
Assert(offnum >= FirstOffsetNumber &&
offnum <= PageGetMaxOffsetNumber(page));
+
/*
* Mask allocated for number of keys in index tuple must be able to fit
* maximum possible number of index attributes
return BTreeTupleGetNAtts(itup, rel) == nkeyatts;
}
}
- else /* !P_ISLEAF(opaque) */
+ else /* !P_ISLEAF(opaque) */
{
if (offnum == P_FIRSTDATAKEY(opaque))
{
/*
* The first tuple on any internal page (possibly the first after
- * its high key) is its negative infinity tuple. Negative infinity
- * tuples are always truncated to zero attributes. They are a
- * particular kind of pivot tuple.
+ * its high key) is its negative infinity tuple. Negative
+ * infinity tuples are always truncated to zero attributes. They
+ * are a particular kind of pivot tuple.
*
* The number of attributes won't be explicitly represented if the
* negative infinity tuple was generated during a page split that
- * occurred with a version of Postgres before v11. There must be a
- * problem when there is an explicit representation that is
+ * occurred with a version of Postgres before v11. There must be
+ * a problem when there is an explicit representation that is
* non-zero, or when there is no explicit representation and the
* tuple is evidently not a pre-pg_upgrade tuple.
*
- * Prior to v11, downlinks always had P_HIKEY as their offset. Use
- * that to decide if the tuple is a pre-v11 tuple.
+ * Prior to v11, downlinks always had P_HIKEY as their offset.
+ * Use that to decide if the tuple is a pre-v11 tuple.
*/
return BTreeTupleGetNAtts(itup, rel) == 0 ||
- ((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
- ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
+ ((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
+ ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
}
else
{
/*
* Prepare the leaf datum to insert.
*
- * If an optional "compress" method is provided, then call it to form
- * the leaf datum from the input datum. Otherwise store the input datum as
- * is. Since we don't use index_form_tuple in this AM, we have to make sure
- * value to be inserted is not toasted; FormIndexDatum doesn't guarantee
- * that. But we assume the "compress" method to return an untoasted value.
+ * If an optional "compress" method is provided, then call it to form the
+ * leaf datum from the input datum. Otherwise store the input datum as
+ * is. Since we don't use index_form_tuple in this AM, we have to make
+ * sure value to be inserted is not toasted; FormIndexDatum doesn't
+ * guarantee that. But we assume the "compress" method to return an
+ * untoasted value.
*/
if (!isnull)
{
OpFamilyOpFuncGroup *opclassgroup;
int i;
ListCell *lc;
- spgConfigIn configIn;
+ spgConfigIn configIn;
spgConfigOut configOut;
Oid configOutLefttype = InvalidOid;
Oid configOutRighttype = InvalidOid;
configOutRighttype = procform->amprocrighttype;
/*
- * When leaf and attribute types are the same, compress function
- * is not required and we set corresponding bit in functionset
- * for later group consistency check.
+ * When leaf and attribute types are the same, compress
+ * function is not required and we set corresponding bit in
+ * functionset for later group consistency check.
*/
if (!OidIsValid(configOut.leafType) ||
configOut.leafType == configIn.attType)
bool initfileinval; /* does relcache init file need invalidation? */
uint16 gidlen; /* length of the GID - GID follows the header */
XLogRecPtr origin_lsn; /* lsn of this record at origin node */
- TimestampTz origin_timestamp; /* time of prepare at origin node */
+ TimestampTz origin_timestamp; /* time of prepare at origin node */
} TwoPhaseFileHeader;
/*
{
TwoPhaseFileHeader *hdr;
StateFileChunk *record;
- bool replorigin;
+ bool replorigin;
/* Add the end sentinel to the list of 2PC records */
RegisterTwoPhaseRecord(TWOPHASE_RM_END_ID, 0,
ParsePrepareRecord(uint8 info, char *xlrec, xl_xact_parsed_prepare *parsed)
{
TwoPhaseFileHeader *hdr;
- char *bufptr;
+ char *bufptr;
hdr = (TwoPhaseFileHeader *) xlrec;
bufptr = xlrec + MAXALIGN(sizeof(TwoPhaseFileHeader));
IsInTransactionBlock(bool isTopLevel)
{
/*
- * Return true on same conditions that would make PreventInTransactionBlock
- * error out
+ * Return true on same conditions that would make
+ * PreventInTransactionBlock error out
*/
if (IsTransactionBlock())
return true;
}
/* dump transaction origin information only for abort prepared */
- if ( (replorigin_session_origin != InvalidRepOriginId) &&
- TransactionIdIsValid(twophase_xid) &&
- XLogLogicalInfoActive())
+ if ((replorigin_session_origin != InvalidRepOriginId) &&
+ TransactionIdIsValid(twophase_xid) &&
+ XLogLogicalInfoActive())
{
xl_xinfo.xinfo |= XACT_XINFO_HAS_ORIGIN;
* Mark that start phase has correctly finished for an exclusive backup.
* Session-level locks are updated as well to reflect that state.
*
- * Note that CHECK_FOR_INTERRUPTS() must not occur while updating
- * backup counters and session-level lock. Otherwise they can be
- * updated inconsistently, and which might cause do_pg_abort_backup()
- * to fail.
+ * Note that CHECK_FOR_INTERRUPTS() must not occur while updating backup
+ * counters and session-level lock. Otherwise they can be updated
+ * inconsistently, and which might cause do_pg_abort_backup() to fail.
*/
if (exclusive)
{
/*
* Clean up session-level lock.
*
- * You might think that WALInsertLockRelease() can be called
- * before cleaning up session-level lock because session-level
- * lock doesn't need to be protected with WAL insertion lock.
- * But since CHECK_FOR_INTERRUPTS() can occur in it,
- * session-level lock must be cleaned up before it.
+ * You might think that WALInsertLockRelease() can be called before
+ * cleaning up session-level lock because session-level lock doesn't need
+ * to be protected with WAL insertion lock. But since
+ * CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be
+ * cleaned up before it.
*/
sessionBackupState = SESSION_BACKUP_NONE;
(uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
(uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
+
/*
* Transfer remaining lines including label and start timeline to
* history file.
bool *backupFromStandby)
{
char startxlogfilename[MAXFNAMELEN];
- TimeLineID tli_from_walseg, tli_from_file;
+ TimeLineID tli_from_walseg,
+ tli_from_file;
FILE *lfp;
char ch;
char backuptype[20];
}
/*
- * Parse START TIME and LABEL. Those are not mandatory fields for
- * recovery but checking for their presence is useful for debugging
- * and the next sanity checks. Cope also with the fact that the
- * result buffers have a pre-allocated size, hence if the backup_label
- * file has been generated with strings longer than the maximum assumed
- * here an incorrect parsing happens. That's fine as only minor
- * consistency checks are done afterwards.
+ * Parse START TIME and LABEL. Those are not mandatory fields for recovery
+ * but checking for their presence is useful for debugging and the next
+ * sanity checks. Cope also with the fact that the result buffers have a
+ * pre-allocated size, hence if the backup_label file has been generated
+ * with strings longer than the maximum assumed here an incorrect parsing
+ * happens. That's fine as only minor consistency checks are done
+ * afterwards.
*/
if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
ereport(DEBUG1,
backuplabel, BACKUP_LABEL_FILE)));
/*
- * START TIMELINE is new as of 11. Its parsing is not mandatory, still
- * use it as a sanity check if present.
+ * START TIMELINE is new as of 11. Its parsing is not mandatory, still use
+ * it as a sanity check if present.
*/
if (fscanf(lfp, "START TIMELINE: %u\n", &tli_from_file) == 1)
{
switch (stmt->objtype)
{
case OBJECT_TABLE:
+
/*
* Because this might be a sequence, we test both relation and
* sequence bits, and later do a more limited test when we know
case OBJECT_VIEW:
msg = gettext_noop("permission denied for view %s");
break;
- /* these currently aren't used */
+ /* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AMOP:
case OBJECT_AMPROC:
case OBJECT_TSDICTIONARY:
msg = gettext_noop("must be owner of text search dictionary %s");
break;
- /*
- * Special cases: For these, the error message talks about
- * "relation", because that's where the ownership is
- * attached. See also check_object_ownership().
- */
+
+ /*
+ * Special cases: For these, the error message talks
+ * about "relation", because that's where the
+ * ownership is attached. See also
+ * check_object_ownership().
+ */
case OBJECT_COLUMN:
case OBJECT_POLICY:
case OBJECT_RULE:
case OBJECT_TRIGGER:
msg = gettext_noop("must be owner of relation %s");
break;
- /* these currently aren't used */
+ /* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AMOP:
case OBJECT_AMPROC:
* transform this deletion request into a delete of this
* owning object.
*
- * For INTERNAL_AUTO dependencies, we don't enforce this;
- * in other words, we don't follow the links back to the
- * owning object.
+ * For INTERNAL_AUTO dependencies, we don't enforce this; in
+ * other words, we don't follow the links back to the owning
+ * object.
*/
if (foundDep->deptype == DEPENDENCY_INTERNAL_AUTO)
break;
to->attislocal = true;
to->attinhcount = 0;
to->attcollation = (i < numkeyatts) ?
- collationObjectId[i] : InvalidOid;
+ collationObjectId[i] : InvalidOid;
}
else
{
to->atttypmod = exprTypmod(indexkey);
to->attislocal = true;
to->attcollation = (i < numkeyatts) ?
- collationObjectId[i] : InvalidOid;
+ collationObjectId[i] : InvalidOid;
ReleaseSysCache(tuple);
}
localaddr = index_constraint_create(heapRelation,
- indexRelationId,
- parentConstraintId,
- indexInfo,
- indexRelationName,
- constraintType,
- constr_flags,
- allow_system_table_mods,
- is_internal);
+ indexRelationId,
+ parentConstraintId,
+ indexInfo,
+ indexRelationName,
+ constraintType,
+ constr_flags,
+ allow_system_table_mods,
+ is_internal);
if (constraintId)
*constraintId = localaddr.objectId;
}
else
{
bool have_simple_col = false;
- DependencyType deptype;
+ DependencyType deptype;
deptype = OidIsValid(parentIndexRelid) ? DEPENDENCY_INTERNAL_AUTO : DEPENDENCY_AUTO;
recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
/*
- * Also, if this is a constraint on a partition, mark it as depending
- * on the constraint in the parent.
+ * Also, if this is a constraint on a partition, mark it as depending on
+ * the constraint in the parent.
*/
if (OidIsValid(parentConstraintId))
{
- ObjectAddress parentConstr;
+ ObjectAddress parentConstr;
ObjectAddressSet(parentConstr, ConstraintRelationId, parentConstraintId);
recordDependencyOn(&referenced, &parentConstr, DEPENDENCY_INTERNAL_AUTO);
Oid *opfamilies1, Oid *opfamilies2,
AttrNumber *attmap, int maplen)
{
- int i;
+ int i;
if (info1->ii_Unique != info2->ii_Unique)
return false;
/* ignore expressions at this stage */
if ((info1->ii_IndexAttrNumbers[i] != InvalidAttrNumber) &&
(attmap[info2->ii_IndexAttrNumbers[i] - 1] !=
- info1->ii_IndexAttrNumbers[i]))
+ info1->ii_IndexAttrNumbers[i]))
return false;
/* collation and opfamily is not valid for including columns */
return false;
if (info1->ii_Expressions != NIL)
{
- bool found_whole_row;
- Node *mapped;
+ bool found_whole_row;
+ Node *mapped;
mapped = map_variable_attnos((Node *) info2->ii_Expressions,
1, 0, attmap, maplen,
return false;
if (info1->ii_Predicate != NULL)
{
- bool found_whole_row;
- Node *mapped;
+ bool found_whole_row;
+ Node *mapped;
mapped = map_variable_attnos((Node *) info2->ii_Predicate,
1, 0, attmap, maplen,
* It is safe to use a non-transactional update even though our
* transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
- * it). And of course the new relpages and
- * reltuples counts are correct regardless. However, we don't want to
- * change relpages (or relallvisible) if the caller isn't providing an
- * updated reltuples count, because that would bollix the
- * reltuples/relpages ratio which is what's really important.
+ * it). And of course the new relpages and reltuples counts are correct
+ * regardless. However, we don't want to change relpages (or
+ * relallvisible) if the caller isn't providing an updated reltuples
+ * count, because that would bollix the reltuples/relpages ratio which is
+ * what's really important.
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
{
SerializedReindexState *sistate = (SerializedReindexState *) reindexstate;
int c = 0;
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
currentlyReindexedHeap = sistate->currentlyReindexedHeap;
currentlyReindexedIndex = sistate->currentlyReindexedIndex;
}
/*
- * get_object_address is pretty sensitive to the length of its input lists;
- * check that they're what it wants.
+ * get_object_address is pretty sensitive to the length of its input
+ * lists; check that they're what it wants.
*/
switch (type)
{
return OBJECT_MATVIEW;
case RELKIND_FOREIGN_TABLE:
return OBJECT_FOREIGN_TABLE;
- /* other relkinds are not supported here because they don't map to OBJECT_* values */
+
+ /*
+ * other relkinds are not supported here because they don't map to
+ * OBJECT_* values
+ */
default:
elog(ERROR, "unexpected relkind: %d", relkind);
return 0;
bool
has_partition_attrs(Relation rel, Bitmapset *attnums, bool *used_in_expr)
{
- PartitionKey key;
+ PartitionKey key;
int partnatts;
List *partexprs;
ListCell *partexprs_item;
Relation pg_constraint;
Relation parentRel;
Relation rel;
- ScanKeyData key;
- SysScanDesc scan;
+ ScanKeyData key;
+ SysScanDesc scan;
TupleDesc tupdesc;
HeapTuple tuple;
AttrNumber *attmap;
while ((tuple = systable_getnext(scan)) != NULL)
{
- Form_pg_constraint constrForm = (Form_pg_constraint) GETSTRUCT(tuple);
+ Form_pg_constraint constrForm = (Form_pg_constraint) GETSTRUCT(tuple);
AttrNumber conkey[INDEX_MAX_KEYS];
AttrNumber mapped_conkey[INDEX_MAX_KEYS];
AttrNumber confkey[INDEX_MAX_KEYS];
nelem,
nelem,
InvalidOid, /* not a domain constraint */
- constrForm->conindid, /* same index */
- constrForm->confrelid, /* same foreign rel */
+ constrForm->conindid, /* same index */
+ constrForm->confrelid, /* same foreign rel */
confkey,
conpfeqop,
conppeqop,
if (cloned)
{
/*
- * Feed back caller about the constraints we created, so that they can
- * set up constraint verification.
+ * Feed back caller about the constraints we created, so that they
+ * can set up constraint verification.
*/
newc = palloc(sizeof(ClonedConstraint));
newc->relid = relationId;
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- PartitionDesc partdesc = RelationGetPartitionDesc(rel);
+ PartitionDesc partdesc = RelationGetPartitionDesc(rel);
int i;
for (i = 0; i < partdesc->nparts; i++)
cloned);
}
- heap_close(rel, NoLock); /* keep lock till commit */
+ heap_close(rel, NoLock); /* keep lock till commit */
heap_close(parentRel, NoLock);
heap_close(pg_constraint, RowShareLock);
}
void
ConstraintSetParentConstraint(Oid childConstrId, Oid parentConstrId)
{
- Relation constrRel;
+ Relation constrRel;
Form_pg_constraint constrForm;
- HeapTuple tuple,
- newtup;
- ObjectAddress depender;
- ObjectAddress referenced;
+ HeapTuple tuple,
+ newtup;
+ ObjectAddress depender;
+ ObjectAddress referenced;
constrRel = heap_open(ConstraintRelationId, RowExclusiveLock);
tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(childConstrId));
get_relation_idx_constraint_oid(Oid relationId, Oid indexId)
{
Relation pg_constraint;
- SysScanDesc scan;
- ScanKeyData key;
+ SysScanDesc scan;
+ ScanKeyData key;
HeapTuple tuple;
Oid constraintId = InvalidOid;
true, NULL, 1, &key);
while ((tuple = systable_getnext(scan)) != NULL)
{
- Form_pg_constraint constrForm;
+ Form_pg_constraint constrForm;
constrForm = (Form_pg_constraint) GETSTRUCT(tuple);
if (constrForm->conindid == indexId)
bool
DeleteInheritsTuple(Oid inhrelid, Oid inhparent)
{
- bool found = false;
+ bool found = false;
Relation catalogRelation;
ScanKeyData key;
SysScanDesc scan;
/* Superusers can bypass permission checks */
if (!superuser())
{
- ObjectType objtype = get_object_type(classId, objectId);
+ ObjectType objtype = get_object_type(classId, objectId);
/* must be owner */
if (!has_privs_of_role(GetUserId(), old_ownerId))
frozenXid, cutoffMulti, mapped_tables);
/*
- * If it's a system catalog, queue a sinval message to flush all
- * catcaches on the catalog when we reach CommandCounterIncrement.
+ * If it's a system catalog, queue a sinval message to flush all catcaches
+ * on the catalog when we reach CommandCounterIncrement.
*/
if (is_system_catalog)
CacheInvalidateCatalog(OIDOldHeap);
slot,
NULL);
- if (slot == NULL) /* "do nothing" */
+ if (slot == NULL) /* "do nothing" */
goto next_tuple;
/* FDW might have changed tuple */
"GRANT" : "REVOKE");
/* object_type */
values[i++] = CStringGetTextDatum(stringify_grant_objtype(
- cmd->d.grant.istmt->objtype));
+ cmd->d.grant.istmt->objtype));
/* schema */
nulls[i++] = true;
/* identity */
return "TABLESPACE";
case OBJECT_TYPE:
return "TYPE";
- /* these currently aren't used */
+ /* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AGGREGATE:
case OBJECT_AMOP:
return "TABLESPACES";
case OBJECT_TYPE:
return "TYPES";
- /* these currently aren't used */
+ /* these currently aren't used */
case OBJECT_ACCESS_METHOD:
case OBJECT_AGGREGATE:
case OBJECT_AMOP:
{
if (objtype == OBJECT_PROCEDURE)
*requiredResultType = RECORDOID;
- else if (outCount == 0) /* save first output param's type */
+ else if (outCount == 0) /* save first output param's type */
*requiredResultType = toid;
outCount++;
}
IndexStmt *stmt,
Oid indexRelationId,
Oid parentIndexId,
- Oid parentConstraintId,
+ Oid parentConstraintId,
bool is_alter_table,
bool check_rights,
bool check_not_in_use,
/*
* Calculate the new list of index columns including both key columns and
- * INCLUDE columns. Later we can determine which of these are key columns,
- * and which are just part of the INCLUDE list by checking the list
- * position. A list item in a position less than ii_NumIndexKeyAttrs is
- * part of the key columns, and anything equal to and over is part of the
- * INCLUDE columns.
+ * INCLUDE columns. Later we can determine which of these are key
+ * columns, and which are just part of the INCLUDE list by checking the
+ * list position. A list item in a position less than ii_NumIndexKeyAttrs
+ * is part of the key columns, and anything equal to and over is part of
+ * the INCLUDE columns.
*/
allIndexParams = list_concat(list_copy(stmt->indexParams),
list_copy(stmt->indexIncludingParams));
/* OK */
break;
case RELKIND_FOREIGN_TABLE:
+
/*
* Custom error message for FOREIGN TABLE since the term is close
* to a regular table and can confuse the user.
* partition-local index can enforce global uniqueness iff the PK
* value completely determines the partition that a row is in.
*
- * Thus, verify that all the columns in the partition key appear
- * in the unique key definition.
+ * Thus, verify that all the columns in the partition key appear in
+ * the unique key definition.
*/
for (i = 0; i < key->partnatts; i++)
{
- bool found = false;
- int j;
+ bool found = false;
+ int j;
const char *constraint_type;
if (stmt->primary)
errmsg("unsupported %s constraint with partition key definition",
constraint_type),
errdetail("%s constraints cannot be used when partition keys include expressions.",
- constraint_type)));
+ constraint_type)));
for (j = 0; j < indexInfo->ii_NumIndexAttrs; j++)
{
/*
* Make the catalog entries for the index, including constraints. This
* step also actually builds the index, except if caller requested not to
- * or in concurrent mode, in which case it'll be done later, or
- * doing a partitioned index (because those don't have storage).
+ * or in concurrent mode, in which case it'll be done later, or doing a
+ * partitioned index (because those don't have storage).
*/
flags = constr_flags = 0;
if (stmt->isconstraint)
if (partitioned)
{
/*
- * Unless caller specified to skip this step (via ONLY), process
- * each partition to make sure they all contain a corresponding index.
+ * Unless caller specified to skip this step (via ONLY), process each
+ * partition to make sure they all contain a corresponding index.
*
* If we're called internally (no stmt->relation), recurse always.
*/
*/
for (i = 0; i < nparts; i++)
{
- Oid childRelid = part_oids[i];
- Relation childrel;
- List *childidxs;
- ListCell *cell;
+ Oid childRelid = part_oids[i];
+ Relation childrel;
+ List *childidxs;
+ ListCell *cell;
AttrNumber *attmap;
- bool found = false;
- int maplen;
+ bool found = false;
+ int maplen;
childrel = heap_open(childRelid, lockmode);
childidxs = RelationGetIndexList(childrel);
opfamOids,
attmap, maplen))
{
- Oid cldConstrOid = InvalidOid;
+ Oid cldConstrOid = InvalidOid;
/*
* Found a match.
childStmt->idxname = NULL;
childStmt->relationId = childRelid;
DefineIndex(childRelid, childStmt,
- InvalidOid, /* no predefined OID */
+ InvalidOid, /* no predefined OID */
indexRelationId, /* this is our child */
createdConstraintId,
is_alter_table, check_rights, check_not_in_use,
/*
* The pg_index row we inserted for this index was marked
- * indisvalid=true. But if we attached an existing index that
- * is invalid, this is incorrect, so update our row to
- * invalid too.
+ * indisvalid=true. But if we attached an existing index that is
+ * invalid, this is incorrect, so update our row to invalid too.
*/
if (invalidate_parent)
{
}
else
{
- indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */
+ indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
expr);
typeOidP[attn] = atttype;
/*
- * Included columns have no collation, no opclass and no ordering options.
+ * Included columns have no collation, no opclass and no ordering
+ * options.
*/
if (attn >= nkeycols)
{
IndexSetParentIndex(Relation partitionIdx, Oid parentOid)
{
Relation pg_inherits;
- ScanKeyData key[2];
- SysScanDesc scan;
+ ScanKeyData key[2];
+ SysScanDesc scan;
Oid partRelid = RelationGetRelid(partitionIdx);
HeapTuple tuple;
bool fix_dependencies;
if (parentOid == InvalidOid)
{
/*
- * No pg_inherits row, and no parent wanted: nothing to do in
- * this case.
+ * No pg_inherits row, and no parent wanted: nothing to do in this
+ * case.
*/
fix_dependencies = false;
}
else
{
- Datum values[Natts_pg_inherits];
- bool isnull[Natts_pg_inherits];
+ Datum values[Natts_pg_inherits];
+ bool isnull[Natts_pg_inherits];
/*
* No pg_inherits row exists, and we want a parent for this index,
}
else
{
- Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple);
+ Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple);
if (parentOid == InvalidOid)
{
if (OidIsValid(parentOid))
{
- ObjectAddress parentIdx;
+ ObjectAddress parentIdx;
ObjectAddressSet(parentIdx, RelationRelationId, parentOid);
recordDependencyOn(&partIdx, &parentIdx, DEPENDENCY_INTERNAL_AUTO);
}
else
{
- ObjectAddress partitionTbl;
+ ObjectAddress partitionTbl;
ObjectAddressSet(partitionTbl, RelationRelationId,
partitionIdx->rd_index->indrelid);
bool nowait; /* no wait mode */
Oid viewowner; /* view owner for checking the privilege */
Oid viewoid; /* OID of the view to be locked */
- List *ancestor_views; /* OIDs of ancestor views */
+ List *ancestor_views; /* OIDs of ancestor views */
} LockViewRecurse_context;
static bool
HeapTuple tuple;
MemoryContextCopyAndSetIdentifier(rscxt,
- RelationGetRelationName(relation));
+ RelationGetRelationName(relation));
rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc));
rsdesc->rscxt = rscxt;
PopActiveSnapshot();
/*
- * We can now release any subsidiary memory of the portal's context;
- * we'll never use it again. The executor already dropped its context,
- * but this will clean up anything that glommed onto the portal's context via
+ * We can now release any subsidiary memory of the portal's context; we'll
+ * never use it again. The executor already dropped its context, but this
+ * will clean up anything that glommed onto the portal's context via
* PortalContext.
*/
MemoryContextDeleteChildren(portal->portalContext);
* If the node has a name, split it up and determine creation namespace.
* If not (a possibility not considered by the grammar, but one which can
* occur via the "CREATE TABLE ... (LIKE)" command), then we put the
- * object in the same namespace as the relation, and cons up a name for it.
+ * object in the same namespace as the relation, and cons up a name for
+ * it.
*/
if (stmt->defnames)
namespaceId = QualifiedNameGetCreationNamespace(stmt->defnames,
for (;;)
{
- Oid existingstats;
+ Oid existingstats;
stxname = makeObjectName(name1, name2, modlabel);
buf[0] = '\0';
foreach(lc, exprs)
{
- ColumnRef *cref = (ColumnRef *) lfirst(lc);
+ ColumnRef *cref = (ColumnRef *) lfirst(lc);
const char *name;
/* It should be one of these, but just skip if it happens not to be */
}
/*
- * Write a WAL record to allow this set of actions to be logically decoded.
+ * Write a WAL record to allow this set of actions to be logically
+ * decoded.
*
* Assemble an array of relids so we can write a single WAL record for the
* whole action.
Assert(XLogLogicalInfoActive());
logrelids = palloc(list_length(relids_logged) * sizeof(Oid));
- foreach (cell, relids_logged)
+ foreach(cell, relids_logged)
logrelids[i++] = lfirst_oid(cell);
xlrec.dbId = MyDatabaseId;
CommandCounterIncrement();
/*
- * Did the request for a missing value work? If not we'll have to do
- * a rewrite
+ * Did the request for a missing value work? If not we'll have to do a
+ * rewrite
*/
if (!rawEnt->missingMode)
tab->rewrite |= AT_REWRITE_DEFAULT_VAL;
ObjectAddressSet(address, ConstraintRelationId, constrOid);
/*
- * Create the triggers that will enforce the constraint. We only want
- * the action triggers to appear for the parent partitioned relation,
- * even though the constraints also exist below.
+ * Create the triggers that will enforce the constraint. We only want the
+ * action triggers to appear for the parent partitioned relation, even
+ * though the constraints also exist below.
*/
createForeignKeyTriggers(rel, RelationGetRelid(pkrel), fkconstraint,
constrOid, indexOid, !recursing);
indexOid);
/*
- * For the referencing side, create the check triggers. We only need these
- * on the partitions.
+ * For the referencing side, create the check triggers. We only need
+ * these on the partitions.
*/
if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
createForeignKeyCheckTriggers(RelationGetRelid(rel), refRelOid,
}
/*
- * Constraints proved insufficient. For plain relations, queue a validation
- * item now; for partitioned tables, recurse to process each partition.
+ * Constraints proved insufficient. For plain relations, queue a
+ * validation item now; for partitioned tables, recurse to process each
+ * partition.
*/
if (scanrel->rd_rel->relkind == RELKIND_RELATION)
{
/*
* If we're attaching a partition other than the default partition and a
* default one exists, then that partition's partition constraint changes,
- * so add an entry to the work queue to validate it, too. (We must not
- * do this when the partition being attached is the default one; we
- * already did it above!)
+ * so add an entry to the work queue to validate it, too. (We must not do
+ * this when the partition being attached is the default one; we already
+ * did it above!)
*/
if (OidIsValid(defaultPartOid))
{
*/
for (i = 0; i < list_length(attachRelIdxs); i++)
{
- Oid cldIdxId = RelationGetRelid(attachrelIdxRels[i]);
- Oid cldConstrOid = InvalidOid;
+ Oid cldIdxId = RelationGetRelid(attachrelIdxRels[i]);
+ Oid cldConstrOid = InvalidOid;
/* does this index have a parent? if so, can't use it */
if (attachrelIdxRels[i]->rd_rel->relispartition)
continue;
Assert((IndexGetRelation(get_partition_parent(idxid), false) ==
- RelationGetRelid(rel)));
+ RelationGetRelid(rel)));
idx = index_open(idxid, AccessExclusiveLock);
IndexSetParentIndex(idx, InvalidOid);
*/
struct AttachIndexCallbackState
{
- Oid partitionOid;
- Oid parentTblOid;
- bool lockedParentTbl;
+ Oid partitionOid;
+ Oid parentTblOid;
+ bool lockedParentTbl;
};
static void
cldConstrId = InvalidOid;
/*
- * If this partition already has an index attached, refuse the operation.
+ * If this partition already has an index attached, refuse the
+ * operation.
*/
refuseDupeIndexAttach(parentIdx, partIdx, partTbl);
errdetail("The index definitions do not match.")));
/*
- * If there is a constraint in the parent, make sure there is one
- * in the child too.
+ * If there is a constraint in the parent, make sure there is one in
+ * the child too.
*/
constraintOid = get_relation_idx_constraint_oid(RelationGetRelid(parentTbl),
RelationGetRelid(parentIdx));
RelationGetRelationName(partIdx),
RelationGetRelationName(parentIdx)),
errdetail("The index \"%s\" belongs to a constraint in table \"%s\" but no constraint exists for index \"%s\".",
- RelationGetRelationName(parentIdx),
- RelationGetRelationName(parentTbl),
- RelationGetRelationName(partIdx))));
+ RelationGetRelationName(parentIdx),
+ RelationGetRelationName(parentTbl),
+ RelationGetRelationName(partIdx))));
}
/* All good -- do it */
static void
refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTbl)
{
- Relation pg_inherits;
- ScanKeyData key;
- HeapTuple tuple;
- SysScanDesc scan;
+ Relation pg_inherits;
+ ScanKeyData key;
+ HeapTuple tuple;
+ SysScanDesc scan;
pg_inherits = heap_open(InheritsRelationId, AccessShareLock);
ScanKeyInit(&key, Anum_pg_inherits_inhparent,
NULL, 1, &key);
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
- Form_pg_inherits inhForm;
+ Form_pg_inherits inhForm;
Oid tab;
inhForm = (Form_pg_inherits) GETSTRUCT(tuple);
static void
validatePartitionedIndex(Relation partedIdx, Relation partedTbl)
{
- Relation inheritsRel;
- SysScanDesc scan;
- ScanKeyData key;
- int tuples = 0;
- HeapTuple inhTup;
- bool updated = false;
+ Relation inheritsRel;
+ SysScanDesc scan;
+ ScanKeyData key;
+ int tuples = 0;
+ HeapTuple inhTup;
+ bool updated = false;
Assert(partedIdx->rd_rel->relkind == RELKIND_PARTITIONED_INDEX);
while ((inhTup = systable_getnext(scan)) != NULL)
{
Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(inhTup);
- HeapTuple indTup;
- Form_pg_index indexForm;
+ HeapTuple indTup;
+ Form_pg_index indexForm;
indTup = SearchSysCache1(INDEXRELID,
- ObjectIdGetDatum(inhForm->inhrelid));
+ ObjectIdGetDatum(inhForm->inhrelid));
if (!indTup)
elog(ERROR, "cache lookup failed for index %u",
inhForm->inhrelid);
* oldtup should be non-NULL, whereas for UPDATE events normally both
* oldtup and newtup are non-NULL. But for UPDATE events fired for
* capturing transition tuples during UPDATE partition-key row
- * movement, oldtup is NULL when the event is for a row being inserted,
- * whereas newtup is NULL when the event is for a row being deleted.
+ * movement, oldtup is NULL when the event is for a row being
+ * inserted, whereas newtup is NULL when the event is for a row being
+ * deleted.
*/
Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
oldtup == NULL));
}
if (newtup != NULL &&
((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
- (event == TRIGGER_EVENT_UPDATE && update_new_table)))
+ (event == TRIGGER_EVENT_UPDATE && update_new_table)))
{
Tuplestorestate *new_tuplestore;
/*
* If transition tables are the only reason we're here, return. As
* mentioned above, we can also be here during update tuple routing in
- * presence of transition tables, in which case this function is called
- * separately for oldtup and newtup, so we expect exactly one of them
- * to be NULL.
+ * presence of transition tables, in which case this function is
+ * called separately for oldtup and newtup, so we expect exactly one
+ * of them to be NULL.
*/
if (trigdesc == NULL ||
(event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
*/
void
ExecEvalFuncExprStrictFusage(ExprState *state, ExprEvalStep *op,
- ExprContext *econtext)
+ ExprContext *econtext)
{
FunctionCallInfo fcinfo = op->d.func.fcinfo_data;
rInfo++;
nr--;
}
+
/*
* Third, search through the result relations that were created during
* tuple routing, if any.
{
/*
* Add a wrapper around the ExecProcNode callback that checks stack depth
- * during the first execution and maybe adds an instrumentation
- * wrapper. When the callback is changed after execution has already begun
- * that means we'll superfluously execute ExecProcNodeFirst, but that seems
- * ok.
+ * during the first execution and maybe adds an instrumentation wrapper.
+ * When the callback is changed after execution has already begun that
+ * means we'll superfluously execute ExecProcNodeFirst, but that seems ok.
*/
node->ExecProcNodeReal = function;
node->ExecProcNode = ExecProcNodeFirst;
if (HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) <
slot->tts_tupleDescriptor->natts)
{
- HeapTuple tuple;
+ HeapTuple tuple;
MemoryContext oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
tuple = heap_expand_tuple(slot->tts_tuple,
/* for each grouping set */
for (i = 0; i < phasedata->numsets; i++)
{
- int length = phasedata->gset_lengths[i];
+ int length = phasedata->gset_lengths[i];
if (phasedata->eqfunctions[length - 1] != NULL)
continue;
if (gatherstate->need_to_scan_locally)
{
- EState *estate = gatherstate->ps.state;
+ EState *estate = gatherstate->ps.state;
/* Install our DSA area while executing the plan. */
estate->es_query_dsa =
{
PlanState *outerPlan = outerPlanState(gm_state);
TupleTableSlot *outerTupleSlot;
- EState *estate = gm_state->ps.state;
+ EState *estate = gm_state->ps.state;
/* Install our DSA area while executing the plan. */
estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL;
List *lclauses;
List *rclauses;
List *hoperators;
- TupleDesc outerDesc, innerDesc;
+ TupleDesc outerDesc,
+ innerDesc;
ListCell *l;
/* check for unsupported flags */
ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
{
MergeJoinState *mergestate;
- TupleDesc outerDesc, innerDesc;
+ TupleDesc outerDesc,
+ innerDesc;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
*/
ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate,
estate, &tuple_deleted, false,
- false /* canSetTag */, true /* changingPart */);
+ false /* canSetTag */ , true /* changingPart */ );
/*
* For some reason if DELETE didn't happen (e.g. trigger prevented
HeapTuple tuple;
/*
- * Determine the target partition. If ExecFindPartition does not find
- * a partition after all, it doesn't return here; otherwise, the returned
+ * Determine the target partition. If ExecFindPartition does not find a
+ * partition after all, it doesn't return here; otherwise, the returned
* value is to be used as an index into the arrays for the ResultRelInfo
* and TupleConversionMap for the partition.
*/
slot = ExecDelete(node, tupleid, oldtuple, planSlot,
&node->mt_epqstate, estate,
NULL, true, node->canSetTag,
- false /* changingPart */);
+ false /* changingPart */ );
break;
default:
elog(ERROR, "unknown operation");
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
(operation == CMD_INSERT || update_tuple_routing_needed))
mtstate->mt_partition_tuple_routing =
- ExecSetupPartitionTupleRouting(mtstate, rel);
+ ExecSetupPartitionTupleRouting(mtstate, rel);
/*
* Build state for collecting transition tuples. This requires having a
RelationGetDescr(scanstate->ss.ss_currentRelation));
/*
- * Initialize result slot, type and projection.
- * tuple table and result tuple initialization
+ * Initialize result slot, type and projection. tuple table and result
+ * tuple initialization
*/
ExecInitResultTupleSlotTL(estate, &scanstate->ss.ps);
ExecAssignScanProjectionInfo(&scanstate->ss);
ExecCreateScanSlotFromOuterPlan(estate, &sortstate->ss);
/*
- * Initialize return slot and type. No need to initialize projection info because
- * this node doesn't do projections.
+ * Initialize return slot and type. No need to initialize projection info
+ * because this node doesn't do projections.
*/
ExecInitResultTupleSlotTL(estate, &sortstate->ss.ps);
sortstate->ss.ps.ps_ProjInfo = NULL;
/*
* Create comparator for lookups of rows in the table (potentially
- * across-type comparison).
+ * across-type comparison).
*/
sstate->cur_eq_comp = ExecBuildGroupingEqual(tupDescLeft, tupDescRight,
ncols,
node->ss.ps.subPlan = NIL;
/*
- * As the expressions are only ever used once, disable JIT for
- * them. This is worthwhile because it's common to insert significant
+ * As the expressions are only ever used once, disable JIT for them.
+ * This is worthwhile because it's common to insert significant
* amounts of data via VALUES().
*/
saved_jit_flags = econtext->ecxt_estate->es_jit_flags;
isnull;
/*
- * At this point aggref->wfuncno is not yet set (it's
- * set up in ExecInitWindowAgg() after initializing the
+ * At this point aggref->wfuncno is not yet set (it's set
+ * up in ExecInitWindowAgg() after initializing the
* expression). So load it from memory each time round.
*/
v_wfuncnop = l_ptr_const(&wfunc->wfuncno,
k_hashes(bloom_filter *filter, uint32 *hashes, unsigned char *elem, size_t len)
{
uint64 hash;
- uint32 x, y;
+ uint32 x,
+ y;
uint64 m;
int i;
check_ssl_key_file_permissions(const char *ssl_key_file, bool isServerStart)
{
int loglevel = isServerStart ? FATAL : LOG;
- struct stat buf;
+ struct stat buf;
if (stat(ssl_key_file, &buf) != 0)
{
if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload)
SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb);
else
+
/*
* If reloading and no external command is configured, override
* OpenSSL's default handling of passphrase-protected files,
return NULL;
/*
- * Get the signature algorithm of the certificate to determine the
- * hash algorithm to use for the result.
+ * Get the signature algorithm of the certificate to determine the hash
+ * algorithm to use for the result.
*/
if (!OBJ_find_sigid_algs(X509_get_signature_nid(server_cert),
&algo_nid, NULL))
{
int result;
int shift = BITS_PER_BITMAPWORD - 8;
+
result = wordnum * BITS_PER_BITMAPWORD;
while ((w >> shift) == 0)
{
/*
* Yes. Figure out whether it is integral or float; this requires
- * both a syntax check and a range check. strtoint() can do both for us.
- * We know the token will end at a character that strtoint will stop at,
- * so we do not need to modify the string.
+ * both a syntax check and a range check. strtoint() can do both for
+ * us. We know the token will end at a character that strtoint will
+ * stop at, so we do not need to modify the string.
*/
char *endptr;
/*
* We need attr_needed data for building targetlist of a join
* relation representing join between matching partitions for
- * partitionwise join. A given attribute of a child will be
- * needed in the same highest joinrel where the corresponding
- * attribute of parent is needed. Hence it suffices to use the
- * same Relids set for parent and child.
+ * partitionwise join. A given attribute of a child will be needed
+ * in the same highest joinrel where the corresponding attribute
+ * of parent is needed. Hence it suffices to use the same Relids
+ * set for parent and child.
*/
for (attno = rel->min_attr; attno <= rel->max_attr; attno++)
{
join_search_one_level(root, lev);
/*
- * Run generate_partitionwise_join_paths() and
- * generate_gather_paths() for each just-processed joinrel. We could
- * not do this earlier because both regular and partial paths can get
- * added to a particular joinrel at multiple times within
- * join_search_one_level.
+ * Run generate_partitionwise_join_paths() and generate_gather_paths()
+ * for each just-processed joinrel. We could not do this earlier
+ * because both regular and partial paths can get added to a
+ * particular joinrel at multiple times within join_search_one_level.
*
* After that, we're done creating paths for the joinrel, so run
* set_cheapest().
opfamily = index->opfamily[indexcol];
idxcollation = index->indexcollations[indexcol];
+
/*
* Clause must be a binary opclause.
*/
IndexCollMatchesExprColl(index->indexcollations[i],
lfirst_oid(collids_cell)))
- break;
+ break;
}
if (i >= index->ncolumns)
break; /* no match found */
RelOptInfo *rel2, RelOptInfo *joinrel,
SpecialJoinInfo *sjinfo, List *restrictlist);
static void try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1,
- RelOptInfo *rel2, RelOptInfo *joinrel,
- SpecialJoinInfo *parent_sjinfo,
- List *parent_restrictlist);
+ RelOptInfo *rel2, RelOptInfo *joinrel,
+ SpecialJoinInfo *parent_sjinfo,
+ List *parent_restrictlist);
static int match_expr_to_partition_keys(Expr *expr, RelOptInfo *rel,
bool strict_op);
*/
static void
try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
- RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo,
- List *parent_restrictlist)
+ RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo,
+ List *parent_restrictlist)
{
int nparts;
int cnt_parts;
joinrel->part_scheme == rel2->part_scheme);
/*
- * Since we allow partitionwise join only when the partition bounds of
- * the joining relations exactly match, the partition bounds of the join
+ * Since we allow partitionwise join only when the partition bounds of the
+ * joining relations exactly match, the partition bounds of the join
* should match those of the joining relations.
*/
Assert(partition_bounds_equal(joinrel->part_scheme->partnatts,
{
/*
* Since we can't generate the final scan/join target, this is our
- * last opportunity to use any partial paths that exist. We don't
- * do this if the case where the target is parallel-safe, since we
- * will be able to generate superior paths by doing it after the
- * final scan/join target has been applied.
+ * last opportunity to use any partial paths that exist. We don't do
+ * this if the case where the target is parallel-safe, since we will
+ * be able to generate superior paths by doing it after the final
+ * scan/join target has been applied.
*
* Note that this may invalidate rel->cheapest_total_path, so we must
* not rely on it after this point without first calling set_cheapest.
/*
* Note down whether any partition key cols are being updated. Though it's
* the root partitioned table's updatedCols we are interested in, we
- * instead use parentrte to get the updatedCols. This is convenient because
- * parentrte already has the root partrel's updatedCols translated to match
- * the attribute ordering of parentrel.
+ * instead use parentrte to get the updatedCols. This is convenient
+ * because parentrte already has the root partrel's updatedCols translated
+ * to match the attribute ordering of parentrel.
*/
if (!root->partColsUpdated)
root->partColsUpdated =
switch (constraint_exclusion)
{
case CONSTRAINT_EXCLUSION_OFF:
+
/*
* Don't prune if feature turned off -- except if the relation is
* a partition. While partprune.c-style partition pruning is not
return false;
case CONSTRAINT_EXCLUSION_PARTITION:
+
/*
* When constraint_exclusion is set to 'partition' we only handle
* OTHER_MEMBER_RELs, or BASERELs in cases where the result target
!(rel->reloptkind == RELOPT_BASEREL &&
root->inhTargetKind != INHKIND_NONE &&
rel->relid == root->parse->resultRelation))
- return false;
+ return false;
break;
case CONSTRAINT_EXCLUSION_ON:
- break; /* always try to exclude */
+ break; /* always try to exclude */
}
/*
static Query *transformCreateTableAsStmt(ParseState *pstate,
CreateTableAsStmt *stmt);
static Query *transformCallStmt(ParseState *pstate,
- CallStmt *stmt);
+ CallStmt *stmt);
static void transformLockingClause(ParseState *pstate, Query *qry,
LockingClause *lc, bool pushedDown);
#ifdef RAW_EXPRESSION_COVERAGE_TEST
cxt->blist = lappend(cxt->blist, seqstmt);
/*
- * Store the identity sequence name that we decided on. ALTER TABLE
- * ... ADD COLUMN ... IDENTITY needs this so that it can fill the new
- * column with values from the sequence, while the association of the
- * sequence with the table is not set until after the ALTER TABLE.
+ * Store the identity sequence name that we decided on. ALTER TABLE ...
+ * ADD COLUMN ... IDENTITY needs this so that it can fill the new column
+ * with values from the sequence, while the association of the sequence
+ * with the table is not set until after the ALTER TABLE.
*/
column->identitySequence = seqstmt->sequence;
*/
if (table_like_clause->options & CREATE_TABLE_LIKE_STATISTICS)
{
- List *parent_extstats;
- ListCell *l;
+ List *parent_extstats;
+ ListCell *l;
parent_extstats = RelationGetStatExtList(relation);
foreach(l, parent_extstats)
{
- Oid parent_stat_oid = lfirst_oid(l);
+ Oid parent_stat_oid = lfirst_oid(l);
CreateStatsStmt *stats_stmt;
stats_stmt = generateClonedExtStatsStmt(cxt->relation,
generateClonedExtStatsStmt(RangeVar *heapRel, Oid heapRelid,
Oid source_statsid)
{
- HeapTuple ht_stats;
+ HeapTuple ht_stats;
Form_pg_statistic_ext statsrec;
CreateStatsStmt *stats;
- List *stat_types = NIL;
- List *def_names = NIL;
- bool isnull;
- Datum datum;
- ArrayType *arr;
- char *enabled;
- int i;
+ List *stat_types = NIL;
+ List *def_names = NIL;
+ bool isnull;
+ Datum datum;
+ ArrayType *arr;
+ char *enabled;
+ int i;
Assert(OidIsValid(heapRelid));
Assert(heapRel != NULL);
*/
if (op_in_opfamily(opclause->opno, partopfamily))
{
- Oid oper;
+ Oid oper;
oper = OidIsValid(commutator) ? commutator : opclause->opno;
get_op_opfamily_properties(oper, partopfamily, false,
{
switch (part_scheme->strategy)
{
- /*
- * For range and list partitioning, we need the ordering
- * procedure with lefttype being the partition key's type, and
- * righttype the clause's operator's right type.
- */
+ /*
+ * For range and list partitioning, we need the ordering
+ * procedure with lefttype being the partition key's type,
+ * and righttype the clause's operator's right type.
+ */
case PARTITION_STRATEGY_LIST:
case PARTITION_STRATEGY_RANGE:
cmpfn =
op_righttype, BTORDER_PROC);
break;
- /*
- * For hash partitioning, we need the hashing procedure for
- * the clause's type.
- */
+ /*
+ * For hash partitioning, we need the hashing procedure
+ * for the clause's type.
+ */
case PARTITION_STRATEGY_HASH:
cmpfn =
get_opfamily_proc(part_scheme->partopfamily[partkeyidx],
static bool
EnableLockPagesPrivilege(int elevel)
{
- HANDLE hToken;
+ HANDLE hToken;
TOKEN_PRIVILEGES tp;
- LUID luid;
+ LUID luid;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken))
{
size);
/*
- * Use the original size, not the rounded-up value, when falling back
- * to non-huge pages.
+ * Use the original size, not the rounded-up value, when
+ * falling back to non-huge pages.
*/
size = orig_size;
flProtect = PAGE_READWRITE;
/*
* Once do_pg_start_backup has been called, ensure that any failure causes
* us to abort the backup so we don't "leak" a backup counter. For this
- * reason, *all* functionality between do_pg_start_backup() and
- * the end of do_pg_stop_backup() should be inside the error cleanup block!
+ * reason, *all* functionality between do_pg_start_backup() and the end of
+ * do_pg_stop_backup() should be inside the error cleanup block!
*/
PG_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum) 0);
{
if (total_checksum_failures > 1)
{
- char buf[64];
+ char buf[64];
snprintf(buf, sizeof(buf), INT64_FORMAT, total_checksum_failures);
char pathbuf[MAXPGPATH * 2];
struct stat statbuf;
int64 size = 0;
- const char *lastDir; /* Split last dir from parent path. */
- bool isDbDir = false; /* Does this directory contain relations? */
+ const char *lastDir; /* Split last dir from parent path. */
+ bool isDbDir = false; /* Does this directory contain relations? */
/*
- * Determine if the current path is a database directory that can
- * contain relations.
+ * Determine if the current path is a database directory that can contain
+ * relations.
*
- * Start by finding the location of the delimiter between the parent
- * path and the current path.
+ * Start by finding the location of the delimiter between the parent path
+ * and the current path.
*/
lastDir = last_dir_separator(path);
strspn(lastDir + 1, "0123456789") == strlen(lastDir + 1))
{
/* Part of path that contains the parent directory. */
- int parentPathLen = lastDir - path;
+ int parentPathLen = lastDir - path;
/*
* Mark path as a database directory if the parent path is either
{
int excludeIdx;
bool excludeFound;
- ForkNumber relForkNum; /* Type of fork if file is a relation */
+ ForkNumber relForkNum; /* Type of fork if file is a relation */
int relOidChars; /* Chars in filename that are the rel oid */
/* Skip special stuff */
/* Never exclude init forks */
if (relForkNum != INIT_FORKNUM)
{
- char initForkFile[MAXPGPATH];
- char relOid[OIDCHARS + 1];
+ char initForkFile[MAXPGPATH];
+ char relOid[OIDCHARS + 1];
/*
* If any other type of fork, check if there is an init fork
while ((cnt = fread(buf, 1, Min(sizeof(buf), statbuf->st_size - len), fp)) > 0)
{
/*
- * The checksums are verified at block level, so we iterate over
- * the buffer in chunks of BLCKSZ, after making sure that
- * TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple
- * of BLCKSZ bytes.
+ * The checksums are verified at block level, so we iterate over the
+ * buffer in chunks of BLCKSZ, after making sure that
+ * TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple of
+ * BLCKSZ bytes.
*/
Assert(TAR_SEND_SIZE % BLCKSZ == 0);
* start of the base backup. Otherwise, they might have been
* written only halfway and the checksum would not be valid.
* However, replaying WAL would reinstate the correct page in
- * this case.
- * We also skip completely new pages, since they don't have
- * a checksum yet.
+ * this case. We also skip completely new pages, since they
+ * don't have a checksum yet.
*/
if (!PageIsNew(page) && PageGetLSN(page) < startptr)
{
static void libpqrcv_check_conninfo(const char *conninfo);
static char *libpqrcv_get_conninfo(WalReceiverConn *conn);
static void libpqrcv_get_senderinfo(WalReceiverConn *conn,
- char **sender_host, int *sender_port);
+ char **sender_host, int *sender_port);
static char *libpqrcv_identify_system(WalReceiverConn *conn,
TimeLineID *primary_tli,
int *server_version);
*/
static void
libpqrcv_get_senderinfo(WalReceiverConn *conn, char **sender_host,
- int *sender_port)
+ int *sender_port)
{
- char *ret = NULL;
+ char *ret = NULL;
*sender_host = NULL;
*sender_port = 0;
static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
Relation relation, ReorderBufferChange *change);
static void truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
- int nrelations, Relation relations[], ReorderBufferChange *change);
+ int nrelations, Relation relations[], ReorderBufferChange *change);
static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
XLogRecPtr message_lsn, bool transactional,
const char *prefix, Size message_size, const char *message);
bool cascade, bool restart_seqs)
{
int i;
- uint8 flags = 0;
+ uint8 flags = 0;
pq_sendbyte(out, 'T'); /* action TRUNCATE */
int i;
int nrelids;
List *relids = NIL;
- uint8 flags;
+ uint8 flags;
nrelids = pq_getmsgint(in, 4);
break;
case REORDER_BUFFER_CHANGE_TRUNCATE:
- {
- int i;
- int nrelids = change->data.truncate.nrelids;
- int nrelations = 0;
- Relation *relations;
-
- relations = palloc0(nrelids * sizeof(Relation));
- for (i = 0; i < nrelids; i++)
{
- Oid relid = change->data.truncate.relids[i];
- Relation relation;
+ int i;
+ int nrelids = change->data.truncate.nrelids;
+ int nrelations = 0;
+ Relation *relations;
- relation = RelationIdGetRelation(relid);
+ relations = palloc0(nrelids * sizeof(Relation));
+ for (i = 0; i < nrelids; i++)
+ {
+ Oid relid = change->data.truncate.relids[i];
+ Relation relation;
- if (relation == NULL)
- elog(ERROR, "could not open relation with OID %u", relid);
+ relation = RelationIdGetRelation(relid);
- if (!RelationIsLogicallyLogged(relation))
- continue;
+ if (relation == NULL)
+ elog(ERROR, "could not open relation with OID %u", relid);
- relations[nrelations++] = relation;
- }
+ if (!RelationIsLogicallyLogged(relation))
+ continue;
- rb->apply_truncate(rb, txn, nrelations, relations, change);
+ relations[nrelations++] = relation;
+ }
- for (i = 0; i < nrelations; i++)
- RelationClose(relations[i]);
+ rb->apply_truncate(rb, txn, nrelations, relations, change);
- break;
- }
+ for (i = 0; i < nrelations; i++)
+ RelationClose(relations[i]);
+
+ break;
+ }
case REORDER_BUFFER_CHANGE_MESSAGE:
rb->message(rb, txn, change->lsn, true,
if (txn->serialized && txn->final_lsn == 0)
{
ReorderBufferChange *last =
- dlist_tail_element(ReorderBufferChange, node, &txn->changes);
+ dlist_tail_element(ReorderBufferChange, node, &txn->changes);
txn->final_lsn = last->lsn;
}
XLogSegNoOffsetToRecPtr(segno, 0, recptr, wal_segment_size);
snprintf(path, MAXPGPATH, "pg_replslot/%s/xid-%u-lsn-%X-%X.snap",
- NameStr(MyReplicationSlot->data.name),
- xid,
- (uint32) (recptr >> 32), (uint32) recptr);
+ NameStr(MyReplicationSlot->data.name),
+ xid,
+ (uint32) (recptr >> 32), (uint32) recptr);
}
/*
static void
apply_handle_truncate(StringInfo s)
{
- bool cascade = false;
- bool restart_seqs = false;
- List *remote_relids = NIL;
- List *remote_rels = NIL;
- List *rels = NIL;
- List *relids = NIL;
- List *relids_logged = NIL;
- ListCell *lc;
+ bool cascade = false;
+ bool restart_seqs = false;
+ List *remote_relids = NIL;
+ List *remote_rels = NIL;
+ List *rels = NIL;
+ List *relids = NIL;
+ List *relids_logged = NIL;
+ ListCell *lc;
ensure_transaction();
}
/*
- * Even if we used CASCADE on the upstream master we explicitly
- * default to replaying changes without further cascading.
- * This might be later changeable with a user specified option.
+ * Even if we used CASCADE on the upstream master we explicitly default to
+ * replaying changes without further cascading. This might be later
+ * changeable with a user specified option.
*/
ExecuteTruncateGuts(rels, relids, relids_logged, DROP_RESTRICT, restart_seqs);
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
static void pgoutput_truncate(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, int nrelations, Relation relations[],
- ReorderBufferChange *change);
+ ReorderBufferTXN *txn, int nrelations, Relation relations[],
+ ReorderBufferChange *change);
static bool pgoutput_origin_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);
pg_logical_replication_slot_advance(XLogRecPtr startlsn, XLogRecPtr moveto)
{
LogicalDecodingContext *ctx;
- ResourceOwner old_resowner = CurrentResourceOwner;
- XLogRecPtr retlsn = InvalidXLogRecPtr;
+ ResourceOwner old_resowner = CurrentResourceOwner;
+ XLogRecPtr retlsn = InvalidXLogRecPtr;
PG_TRY();
{
{
/*
* Only superusers and members of pg_read_all_stats can see details.
- * Other users only get the pid value
- * to know whether it is a WAL receiver, but no details.
+ * Other users only get the pid value to know whether it is a WAL
+ * receiver, but no details.
*/
MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
}
WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
bool last_write)
{
- TimestampTz now;
+ TimestampTz now;
/* output previously gathered data in a CopyData packet */
pq_putmessage_noblock('d', ctx->out->data, ctx->out->len);
if (!is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS))
{
/*
- * Only superusers and members of pg_read_all_stats can see details.
- * Other users only get the pid value to know it's a walsender,
- * but no details.
+ * Only superusers and members of pg_read_all_stats can see
+ * details. Other users only get the pid value to know it's a
+ * walsender, but no details.
*/
MemSet(&nulls[1], true, PG_STAT_GET_WAL_SENDERS_COLS - 1);
}
/*
* It is possible that there are files left over from before a crash
- * restart with the same name. In order for BufFileOpenShared()
- * not to get confused about how many segments there are, we'll unlink
- * the next segment number if it already exists.
+ * restart with the same name. In order for BufFileOpenShared() not to
+ * get confused about how many segments there are, we'll unlink the next
+ * segment number if it already exists.
*/
SharedSegmentName(name, buffile->name, segment + 1);
SharedFileSetDelete(buffile->fileset, name, true);
/*
* Separate prior reads of mq_ring from the increment of mq_bytes_read
- * which follows. This pairs with the full barrier in shm_mq_send_bytes().
- * We only need a read barrier here because the increment of mq_bytes_read
- * is actually a read followed by a dependent write.
+ * which follows. This pairs with the full barrier in
+ * shm_mq_send_bytes(). We only need a read barrier here because the
+ * increment of mq_bytes_read is actually a read followed by a dependent
+ * write.
*/
pg_read_barrier();
* intended effect!
*/
PreventInTransactionBlock(isTopLevel,
- (stmt->kind == REINDEX_OBJECT_SCHEMA) ? "REINDEX SCHEMA" :
- (stmt->kind == REINDEX_OBJECT_SYSTEM) ? "REINDEX SYSTEM" :
- "REINDEX DATABASE");
+ (stmt->kind == REINDEX_OBJECT_SCHEMA) ? "REINDEX SCHEMA" :
+ (stmt->kind == REINDEX_OBJECT_SYSTEM) ? "REINDEX SYSTEM" :
+ "REINDEX DATABASE");
ReindexMultipleTables(stmt->name, stmt->kind, stmt->options);
break;
default:
if (stmt->concurrent)
PreventInTransactionBlock(isTopLevel,
- "CREATE INDEX CONCURRENTLY");
+ "CREATE INDEX CONCURRENTLY");
/*
* Look up the relation OID just once, right here at the
case OBJECT_INDEX:
if (stmt->concurrent)
PreventInTransactionBlock(isTopLevel,
- "DROP INDEX CONCURRENTLY");
+ "DROP INDEX CONCURRENTLY");
/* fall through */
case OBJECT_TABLE:
websearch_to_tsquery_byid(PG_FUNCTION_ARGS)
{
text *in = PG_GETARG_TEXT_PP(1);
- MorphOpaque data;
+ MorphOpaque data;
TSQuery query = NULL;
data.cfg_id = PG_GETARG_OID(0);
}
/*
- * At this point, either index_oid == InvalidOid or it's a valid index OID.
- * Also, after this test and the one below, either attno == 0 for
+ * At this point, either index_oid == InvalidOid or it's a valid index
+ * OID. Also, after this test and the one below, either attno == 0 for
* index-wide or AM-wide tests, or it's a valid column number in a valid
* index.
*/
break;
case AMPROP_ORDERABLE:
+
/*
* generic assumption is that nonkey columns are not orderable
*/
* getting there from just the index column type seems like a
* lot of work. So instead we expect the AM to handle this in
* its amproperty routine. The generic result is to return
- * false if the AM says it never supports this, or if this is a
- * nonkey column, and null otherwise (meaning we don't know).
+ * false if the AM says it never supports this, or if this is
+ * a nonkey column, and null otherwise (meaning we don't
+ * know).
*/
if (!iskey || !routine->amcanorderbyop)
{
{
/*
* If possible, the AM should handle this test in its
- * amproperty function without opening the rel. But this is the
- * generic fallback if it does not.
+ * amproperty function without opening the rel. But this
+ * is the generic fallback if it does not.
*/
Relation indexrel = index_open(index_oid, AccessShareLock);
DateTimeParseError(DTERR_TZDISP_OVERFLOW, date_str, "timestamp");
tz = psprintf("%c%02d:%02d",
- tmfc.tzsign > 0 ? '+' : '-', tmfc.tzh, tmfc.tzm);
+ tmfc.tzsign > 0 ? '+' : '-', tmfc.tzh, tmfc.tzm);
tm->tm_zone = tz;
}
/* Perform the required comparison(s) */
for (i = 0; i < in->nkeys; i++)
{
- StrategyNumber strategy = in->scankeys[i].sk_strategy;
- BOX *box = spg_box_quad_get_scankey_bbox(&in->scankeys[i],
- &out->recheck);
- Datum query = BoxPGetDatum(box);
+ StrategyNumber strategy = in->scankeys[i].sk_strategy;
+ BOX *box = spg_box_quad_get_scankey_bbox(&in->scankeys[i],
+ &out->recheck);
+ Datum query = BoxPGetDatum(box);
switch (strategy)
{
Datum
spg_poly_quad_compress(PG_FUNCTION_ARGS)
{
- POLYGON *polygon = PG_GETARG_POLYGON_P(0);
+ POLYGON *polygon = PG_GETARG_POLYGON_P(0);
BOX *box;
box = box_copy(&polygon->boundbox);
return NULL;
/*
- * A root scalar is stored as an array of one element, so we get the
- * array and then its first (and only) member.
+ * A root scalar is stored as an array of one element, so we get the array
+ * and then its first (and only) member.
*/
it = JsonbIteratorInit(jbc);
Assert(tmp.val.array.nElems == 1 && tmp.val.array.rawScalar);
tok = JsonbIteratorNext(&it, res, true);
- Assert (tok == WJB_ELEM);
+ Assert(tok == WJB_ELEM);
Assert(IsAJsonbScalar(res));
tok = JsonbIteratorNext(&it, &tmp, true);
- Assert (tok == WJB_END_ARRAY);
+ Assert(tok == WJB_END_ARRAY);
tok = JsonbIteratorNext(&it, &tmp, true);
Assert(tok == WJB_DONE);
errmsg("jsonb value must be numeric")));
/*
- * v.val.numeric points into jsonb body, so we need to make a copy to return
+ * v.val.numeric points into jsonb body, so we need to make a copy to
+ * return
*/
retValue = DatumGetNumericCopy(NumericGetDatum(v.val.numeric));
jsonb_int2(PG_FUNCTION_ARGS)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
- JsonbValue v;
+ JsonbValue v;
Datum retValue;
if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric)
jsonb_int4(PG_FUNCTION_ARGS)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
- JsonbValue v;
+ JsonbValue v;
Datum retValue;
if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric)
jsonb_int8(PG_FUNCTION_ARGS)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
- JsonbValue v;
+ JsonbValue v;
Datum retValue;
if (!JsonbExtractScalar(&in->root, &v) || v.type != jbvNumeric)
JsonIterateStringValuesAction action; /* an action that will be applied
* to each json value */
void *action_state; /* any necessary context for iteration */
- uint32 flags; /* what kind of elements from a json we want to iterate */
+ uint32 flags; /* what kind of elements from a json we want
+ * to iterate */
} IterateJsonStringValuesState;
/* state for transform_json_string_values function */
uint32
parse_jsonb_index_flags(Jsonb *jb)
{
- JsonbIterator *it;
- JsonbValue v;
- JsonbIteratorToken type;
- uint32 flags = 0;
+ JsonbIterator *it;
+ JsonbValue v;
+ JsonbIteratorToken type;
+ uint32 flags = 0;
it = JsonbIteratorInit(&jb->root);
type = JsonbIteratorNext(&it, &v, false);
/*
- * We iterate over array (scalar internally is represented as array, so, we
- * will accept it too) to check all its elements. Flag names are chosen
- * the same as jsonb_typeof uses.
+ * We iterate over array (scalar internally is represented as array, so,
+ * we will accept it too) to check all its elements. Flag names are
+ * chosen the same as jsonb_typeof uses.
*/
if (type != WJB_BEGIN_ARRAY)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errhint("Possible values are: \"string\", \"numeric\", \"boolean\", \"key\" and \"all\"")));
if (v.val.string.len == 3 &&
- pg_strncasecmp(v.val.string.val, "all", 3) == 0)
+ pg_strncasecmp(v.val.string.val, "all", 3) == 0)
flags |= jtiAll;
else if (v.val.string.len == 3 &&
pg_strncasecmp(v.val.string.val, "key", 3) == 0)
}
/* JsonbValue is a value of object or element of array */
- switch(v.type)
+ switch (v.type)
{
case jbvString:
if (flags & jtiString)
case jbvNumeric:
if (flags & jtiNumeric)
{
- char *val;
+ char *val;
val = DatumGetCString(DirectFunctionCall1(numeric_out,
- NumericGetDatum(v.val.numeric)));
+ NumericGetDatum(v.val.numeric)));
action(state, val, strlen(val));
pfree(val);
{
IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state;
- switch(tokentype)
+ switch (tokentype)
{
case JSON_TOKEN_STRING:
if (_state->flags & jtiString)
if (_state->flags & jtiKey)
{
- char *val = pstrdup(fname);
+ char *val = pstrdup(fname);
+
_state->action(_state->action_state, val, strlen(val));
}
}
* *strval, *lenval and *weight are filled in when return value is PT_VAL
*
*/
-typedef ts_tokentype (*ts_tokenizer)(TSQueryParserState state, int8 *operator,
- int *lenval, char **strval,
- int16 *weight, bool *prefix);
+typedef ts_tokentype (*ts_tokenizer) (TSQueryParserState state, int8 *operator,
+ int *lenval, char **strval,
+ int16 *weight, bool *prefix);
struct TSQueryParserStateData
{
static bool
parse_or_operator(TSQueryParserState pstate)
{
- char *ptr = pstate->buf;
+ char *ptr = pstate->buf;
if (pstate->in_quotes)
return false;
ptr += 2;
/*
- * it shouldn't be a part of any word but somewhere later it should be some
- * operand
+ * it shouldn't be a part of any word but somewhere later it should be
+ * some operand
*/
- if (*ptr == '\0') /* no operand */
+ if (*ptr == '\0') /* no operand */
return false;
/* it shouldn't be a part of any word */
- if (t_iseq(ptr, '-') || t_iseq(ptr, '_') || t_isalpha(ptr) || t_isdigit(ptr))
+ if (t_iseq(ptr, '-') || t_iseq(ptr, '_') || t_isalpha(ptr) || t_isdigit(ptr))
return false;
- for(;;)
+ for (;;)
{
ptr += pg_mblen(ptr);
- if (*ptr == '\0') /* got end of string without operand */
+ if (*ptr == '\0') /* got end of string without operand */
return false;
/*
- * Suppose, we found an operand, but could be a not correct operand. So
- * we still treat OR literal as operation with possibly incorrect
+ * Suppose, we found an operand, but could be a not correct operand.
+ * So we still treat OR literal as operation with possibly incorrect
* operand and will not search it as lexeme
*/
if (!t_isspace(ptr))
}
else if (!t_isspace(state->buf))
{
- /* We rely on the tsvector parser to parse the value for us */
+ /*
+ * We rely on the tsvector parser to parse the value for
+ * us
+ */
reset_tsvector_parser(state->valstate, state->buf);
if (gettoken_tsvector(state->valstate, strval, lenval,
NULL, NULL, &state->buf))
}
else if (!t_isspace(state->buf))
{
- /* We rely on the tsvector parser to parse the value for us */
+ /*
+ * We rely on the tsvector parser to parse the value for
+ * us
+ */
reset_tsvector_parser(state->valstate, state->buf);
if (gettoken_tsvector(state->valstate, strval, lenval,
NULL, NULL, &state->buf))
if (!state->in_quotes)
{
/*
- * put implicit AND after an operand
- * and handle this quote in WAITOPERAND
+ * put implicit AND after an operand and handle this
+ * quote in WAITOPERAND
*/
state->state = WAITOPERAND;
*operator = OP_AND;
ALLOCSET_SMALL_SIZES);
relation->rd_rulescxt = rulescxt;
MemoryContextCopyAndSetIdentifier(rulescxt,
- RelationGetRelationName(relation));
+ RelationGetRelationName(relation));
/*
* allocate an array to hold the rewrite rules (the array is extended if
ALLOCSET_SMALL_SIZES);
relation->rd_indexcxt = indexcxt;
MemoryContextCopyAndSetIdentifier(indexcxt,
- RelationGetRelationName(relation));
+ RelationGetRelationName(relation));
/*
* Now we can fetch the index AM's API struct
expensive, so we don't attempt it by default.
* 2. "recheck_on_update" index option explicitly set by user, which overrides 1)
*/
-static bool IsProjectionFunctionalIndex(Relation index, IndexInfo* ii)
+static bool
+IsProjectionFunctionalIndex(Relation index, IndexInfo *ii)
{
- bool is_projection = false;
+ bool is_projection = false;
if (ii->ii_Expressions)
{
- HeapTuple tuple;
- Datum reloptions;
- bool isnull;
- QualCost index_expr_cost;
+ HeapTuple tuple;
+ Datum reloptions;
+ bool isnull;
+ QualCost index_expr_cost;
/* by default functional index is considered as non-injective */
is_projection = true;
* inserting a new index entry for the changed value.
*/
if ((index_expr_cost.startup + index_expr_cost.per_tuple) >
- HEURISTIC_MAX_HOT_RECHECK_EXPR_COST)
+ HEURISTIC_MAX_HOT_RECHECK_EXPR_COST)
is_projection = false;
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(RelationGetRelid(index)));
RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
{
Bitmapset *indexattrs; /* columns used in non-projection indexes */
- Bitmapset *projindexattrs; /* columns used in projection indexes */
+ Bitmapset *projindexattrs; /* columns used in projection indexes */
Bitmapset *uindexattrs; /* columns in unique indexes */
Bitmapset *pkindexattrs; /* columns in the primary index */
Bitmapset *idindexattrs; /* columns in the replica identity */
Oid relreplindex;
ListCell *l;
MemoryContext oldcxt;
- int indexno;
+ int indexno;
/* Quick exit if we already computed the result. */
if (relation->rd_indexattr != NULL)
ALLOCSET_SMALL_SIZES);
rel->rd_indexcxt = indexcxt;
MemoryContextCopyAndSetIdentifier(indexcxt,
- RelationGetRelationName(rel));
+ RelationGetRelationName(rel));
/*
* Now we can fetch the index AM's API struct. (We can't store
static CFuncHashTabEntry *lookup_C_func(HeapTuple procedureTuple);
static void record_C_func(HeapTuple procedureTuple,
PGFunction user_fn, const Pg_finfo_record *inforec);
+
/* extern so it's callable via JIT */
extern Datum fmgr_security_definer(PG_FUNCTION_ARGS);
!heap_attisnull(procedureTuple, Anum_pg_proc_proconfig, NULL) ||
FmgrHookIsNeeded(functionId))
{
- *mod = NULL; /* core binary */
+ *mod = NULL; /* core binary */
*fn = pstrdup("fmgr_security_definer");
ReleaseSysCache(procedureTuple);
return;
if (isnull)
elog(ERROR, "null prosrc");
- *mod = NULL; /* core binary */
+ *mod = NULL; /* core binary */
*fn = TextDatumGetCString(prosrcattr);
break;
break;
case SQLlanguageId:
- *mod = NULL; /* core binary */
+ *mod = NULL; /* core binary */
*fn = pstrdup("fmgr_sql");
break;
default:
*mod = NULL;
- *fn = NULL; /* unknown, pass pointer */
+ *fn = NULL; /* unknown, pass pointer */
break;
}
},
&jit_debugging_support,
false,
+
/*
* This is not guaranteed to be available, but given it's a developer
* oriented option, it doesn't seem worth adding code checking
},
&jit_profiling_support,
false,
+
/*
* This is not guaranteed to be available, but given it's a developer
* oriented option, it doesn't seem worth adding code checking
Assert(TopPortalContext == NULL);
TopPortalContext = AllocSetContextCreate(TopMemoryContext,
- "TopPortalContext",
- ALLOCSET_DEFAULT_SIZES);
+ "TopPortalContext",
+ ALLOCSET_DEFAULT_SIZES);
ctl.keysize = MAX_PORTALNAME_LEN;
ctl.entrysize = sizeof(PortalHashEnt);
HoldPortal(Portal portal)
{
/*
- * Note that PersistHoldablePortal() must release all resources
- * used by the portal that are local to the creating transaction.
+ * Note that PersistHoldablePortal() must release all resources used by
+ * the portal that are local to the creating transaction.
*/
PortalCreateHoldStore(portal);
PersistHoldablePortal(portal);
PortalReleaseCachedPlan(portal);
/*
- * Any resources belonging to the portal will be released in the
- * upcoming transaction-wide cleanup; the portal will no longer
- * have its own resources.
+ * Any resources belonging to the portal will be released in the upcoming
+ * transaction-wide cleanup; the portal will no longer have its own
+ * resources.
*/
portal->resowner = NULL;
/*
- * Having successfully exported the holdable cursor, mark it as
- * not belonging to this transaction.
+ * Having successfully exported the holdable cursor, mark it as not
+ * belonging to this transaction.
*/
portal->createSubid = InvalidSubTransactionId;
portal->activeSubid = InvalidSubTransactionId;
{
/*
* Doing transaction control, especially abort, inside a cursor
- * loop that is not read-only, for example using UPDATE
- * ... RETURNING, has weird semantics issues. Also, this
+ * loop that is not read-only, for example using UPDATE ...
+ * RETURNING, has weird semantics issues. Also, this
* implementation wouldn't work, because such portals cannot be
* held. (The core grammar enforces that only SELECT statements
* can drive a cursor, but for example PL/pgSQL does not restrict
int ntuples; /* Number of tuples in this chunk. */
int overflow; /* If overflow, how many including this one? */
char data[FLEXIBLE_ARRAY_MEMBER];
-} SharedTuplestoreChunk;
+} SharedTuplestoreChunk;
/* Per-participant shared state. */
typedef struct SharedTuplestoreParticipant
BlockNumber read_page; /* Page number for next read. */
BlockNumber npages; /* Number of pages written. */
bool writing; /* Used only for assertions. */
-} SharedTuplestoreParticipant;
+} SharedTuplestoreParticipant;
/* The control object that lives in shared memory. */
struct SharedTuplestore
/*
* Set always-secure search path, so malicious users can't get control.
- * The capacity to run normal SQL queries was added in PostgreSQL
- * 10, so the search path cannot be changed (by us or attackers) on
- * earlier versions.
+ * The capacity to run normal SQL queries was added in PostgreSQL 10, so
+ * the search path cannot be changed (by us or attackers) on earlier
+ * versions.
*/
if (dbname != NULL && PQserverVersion(tmpconn) >= 100000)
{
static PTOKEN_PRIVILEGES
GetPrivilegesToDelete(HANDLE hToken)
{
- int i, j;
+ int i,
+ j;
DWORD length;
PTOKEN_PRIVILEGES tokenPrivs;
LUID luidLockPages;
findParentsByOid(&tblinfo[i], inhinfo, numInherits);
/*
- * If needed, mark the parents as interesting for getTableAttrs
- * and getIndexes.
+ * If needed, mark the parents as interesting for getTableAttrs and
+ * getIndexes.
*/
if (mark_parents)
{
static void
flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
{
- int i,
- j,
- k;
+ int i,
+ j,
+ k;
DumpableObject ***parentIndexArray;
parentIndexArray = (DumpableObject ***)
for (i = 0; i < numTables; i++)
{
- TableInfo *parenttbl;
+ TableInfo *parenttbl;
IndexAttachInfo *attachinfo;
if (!tblinfo[i].ispartition || tblinfo[i].numParents == 0)
/*
* We want dependencies from parent to partition (so that the
- * partition index is created first), and another one from
- * attach object to parent (so that the partition index is
- * attached once the parent index has been created).
+ * partition index is created first), and another one from attach
+ * object to parent (so that the partition index is attached once
+ * the parent index has been created).
*/
addObjectDependency(&parentidx->dobj, index->dobj.dumpId);
addObjectDependency(&attachinfo[k].dobj, parentidx->dobj.dumpId);
}
else if (coninfo->contype == 'f')
{
- char *only;
+ char *only;
/*
- * Foreign keys on partitioned tables are always declared as inheriting
- * to partitions; for all other cases, emit them as applying ONLY
- * directly to the named table, because that's how they work for
- * regular inherited tables.
+ * Foreign keys on partitioned tables are always declared as
+ * inheriting to partitions; for all other cases, emit them as
+ * applying ONLY directly to the named table, because that's how they
+ * work for regular inherited tables.
*/
only = tbinfo->relkind == RELKIND_PARTITIONED_TABLE ? "" : "ONLY ";
* when stats_temp_directory is set because PGSS_TEXT_FILE is always
* created there.
*/
- "pg_stat_tmp", /* defined as PG_STAT_TMP_DIR */
+ "pg_stat_tmp", /* defined as PG_STAT_TMP_DIR */
/*
* It is generally not useful to backup the contents of this directory
"pg_replslot",
/* Contents removed on startup, see dsm_cleanup_for_mmap(). */
- "pg_dynshmem", /* defined as PG_DYNSHMEM_DIR */
+ "pg_dynshmem", /* defined as PG_DYNSHMEM_DIR */
/* Contents removed on startup, see AsyncShmemInit(). */
"pg_notify",
static bool
check_file_excluded(const char *path, const char *type)
{
- char localpath[MAXPGPATH];
- int excludeIdx;
- const char *filename;
+ char localpath[MAXPGPATH];
+ int excludeIdx;
+ const char *filename;
/* check individual files... */
for (excludeIdx = 0; excludeFiles[excludeIdx] != NULL; excludeIdx++)
/*
* The sscanf tests above can match files that have extra characters at
* the end. To eliminate such cases, cross-check that GetRelationPath
- * creates the exact same filename, when passed the RelFileNode information
- * we extracted from the filename.
+ * creates the exact same filename, when passed the RelFileNode
+ * information we extracted from the filename.
*/
if (matched)
{
pg_log(PG_VERBOSE, "%s\n", cmd);
#ifdef WIN32
+
/*
* For some reason, Windows issues a file-in-use error if we write data to
* the log file from a non-primary thread just before we create a
}
#ifndef WIN32
+
/*
* We can't do this on Windows because it will keep the "pg_ctl start"
* output filename open until the server stops, so we do the \n\n above on
/*
* If pg_ctl failed, and the connection didn't fail, and
- * report_and_exit_on_error is enabled, fail now. This
- * could happen if the server was already running.
+ * report_and_exit_on_error is enabled, fail now. This could happen if
+ * the server was already running.
*/
if (!pg_ctl_return)
{
char *index_tablespace = NULL;
/* random seed used when calling srandom() */
-int64 random_seed = -1;
+int64 random_seed = -1;
/*
* end of configurable parameters
/* set harmonicn and other parameters to cache cell */
static void
-zipfSetCacheCell(ZipfCell * cell, int64 n, double s)
+zipfSetCacheCell(ZipfCell *cell, int64 n, double s)
{
double harmonic2;
* and create new cell if it does not exist
*/
static ZipfCell *
-zipfFindOrCreateCacheCell(ZipfCache * cache, int64 n, double s)
+zipfFindOrCreateCacheCell(ZipfCache *cache, int64 n, double s)
{
int i,
least_recently_used = 0;
static int64
getHashFnv1a(int64 val, uint64 seed)
{
- int64 result;
- int i;
+ int64 result;
+ int i;
result = FNV_OFFSET_BASIS ^ seed;
for (i = 0; i < 8; ++i)
{
- int32 octet = val & 0xff;
+ int32 octet = val & 0xff;
val = val >> 8;
result = result ^ octet;
static int64
getHashMurmur2(int64 val, uint64 seed)
{
- uint64 result = seed ^ (sizeof(int64) * MM2_MUL);
- uint64 k = (uint64) val;
+ uint64 result = seed ^ (sizeof(int64) * MM2_MUL);
+ uint64 k = (uint64) val;
k *= MM2_MUL;
k ^= k >> MM2_ROT;
else if (var->value.type == PGBT_DOUBLE)
snprintf(stringform, sizeof(stringform),
"%.*g", DBL_DIG, var->value.u.dval);
- else /* internal error, unexpected type */
+ else /* internal error, unexpected type */
Assert(0);
var->svalue = pg_strdup(stringform);
return var->svalue;
static bool
makeVariableValue(Variable *var)
{
- size_t slen;
+ size_t slen;
if (var->value.type != PGBT_NO_VALUE)
return true; /* no work */
{
setNullValue(&var->value);
}
+
/*
- * accept prefixes such as y, ye, n, no... but not for "o".
- * 0/1 are recognized later as an int, which is converted
- * to bool if needed.
+ * accept prefixes such as y, ye, n, no... but not for "o". 0/1 are
+ * recognized later as an int, which is converted to bool if needed.
*/
else if (pg_strncasecmp(var->svalue, "true", slen) == 0 ||
pg_strncasecmp(var->svalue, "yes", slen) == 0 ||
/* Returns false on failure (bad name) */
static bool
putVariableValue(CState *st, const char *context, char *name,
- const PgBenchValue *value)
+ const PgBenchValue *value)
{
Variable *var;
*bval = pval->u.bval;
return true;
}
- else /* NULL, INT or DOUBLE */
+ else /* NULL, INT or DOUBLE */
{
fprintf(stderr, "cannot coerce %s to boolean\n", valueTypeName(pval));
*bval = false; /* suppress uninitialized-variable warnings */
*ival = (int64) dval;
return true;
}
- else /* BOOLEAN or NULL */
+ else /* BOOLEAN or NULL */
{
fprintf(stderr, "cannot coerce %s to int\n", valueTypeName(pval));
return false;
*dval = (double) pval->u.ival;
return true;
}
- else /* BOOLEAN or NULL */
+ else /* BOOLEAN or NULL */
{
fprintf(stderr, "cannot coerce %s to double\n", valueTypeName(pval));
return false;
pv->u.dval = dval;
}
-static bool isLazyFunc(PgBenchFunction func)
+static bool
+isLazyFunc(PgBenchFunction func)
{
return func == PGBENCH_AND || func == PGBENCH_OR || func == PGBENCH_CASE;
}
evalLazyFunc(TState *thread, CState *st,
PgBenchFunction func, PgBenchExprLink *args, PgBenchValue *retval)
{
- PgBenchValue a1, a2;
- bool ba1, ba2;
+ PgBenchValue a1,
+ a2;
+ bool ba1,
+ ba2;
Assert(isLazyFunc(func) && args != NULL && args->next != NULL);
switch (func)
{
- case PGBENCH_AND:
- if (a1.type == PGBT_NULL)
- {
- setNullValue(retval);
- return true;
- }
+ case PGBENCH_AND:
+ if (a1.type == PGBT_NULL)
+ {
+ setNullValue(retval);
+ return true;
+ }
- if (!coerceToBool(&a1, &ba1))
- return false;
+ if (!coerceToBool(&a1, &ba1))
+ return false;
- if (!ba1)
- {
- setBoolValue(retval, false);
- return true;
- }
+ if (!ba1)
+ {
+ setBoolValue(retval, false);
+ return true;
+ }
- if (!evaluateExpr(thread, st, args->expr, &a2))
- return false;
+ if (!evaluateExpr(thread, st, args->expr, &a2))
+ return false;
- if (a2.type == PGBT_NULL)
- {
- setNullValue(retval);
- return true;
- }
- else if (!coerceToBool(&a2, &ba2))
- return false;
- else
- {
- setBoolValue(retval, ba2);
- return true;
- }
+ if (a2.type == PGBT_NULL)
+ {
+ setNullValue(retval);
+ return true;
+ }
+ else if (!coerceToBool(&a2, &ba2))
+ return false;
+ else
+ {
+ setBoolValue(retval, ba2);
+ return true;
+ }
- return true;
+ return true;
- case PGBENCH_OR:
+ case PGBENCH_OR:
- if (a1.type == PGBT_NULL)
- {
- setNullValue(retval);
- return true;
- }
+ if (a1.type == PGBT_NULL)
+ {
+ setNullValue(retval);
+ return true;
+ }
- if (!coerceToBool(&a1, &ba1))
- return false;
+ if (!coerceToBool(&a1, &ba1))
+ return false;
- if (ba1)
- {
- setBoolValue(retval, true);
- return true;
- }
+ if (ba1)
+ {
+ setBoolValue(retval, true);
+ return true;
+ }
- if (!evaluateExpr(thread, st, args->expr, &a2))
- return false;
+ if (!evaluateExpr(thread, st, args->expr, &a2))
+ return false;
- if (a2.type == PGBT_NULL)
- {
- setNullValue(retval);
- return true;
- }
- else if (!coerceToBool(&a2, &ba2))
- return false;
- else
- {
- setBoolValue(retval, ba2);
- return true;
- }
+ if (a2.type == PGBT_NULL)
+ {
+ setNullValue(retval);
+ return true;
+ }
+ else if (!coerceToBool(&a2, &ba2))
+ return false;
+ else
+ {
+ setBoolValue(retval, ba2);
+ return true;
+ }
- case PGBENCH_CASE:
- /* when true, execute branch */
- if (valueTruth(&a1))
- return evaluateExpr(thread, st, args->expr, retval);
+ case PGBENCH_CASE:
+ /* when true, execute branch */
+ if (valueTruth(&a1))
+ return evaluateExpr(thread, st, args->expr, retval);
- /* now args contains next condition or final else expression */
- args = args->next;
+ /* now args contains next condition or final else expression */
+ args = args->next;
- /* final else case? */
- if (args->next == NULL)
- return evaluateExpr(thread, st, args->expr, retval);
+ /* final else case? */
+ if (args->next == NULL)
+ return evaluateExpr(thread, st, args->expr, retval);
- /* no, another when, proceed */
- return evalLazyFunc(thread, st, PGBENCH_CASE, args, retval);
+ /* no, another when, proceed */
+ return evalLazyFunc(thread, st, PGBENCH_CASE, args, retval);
- default:
- /* internal error, cannot get here */
- Assert(0);
- break;
+ default:
+ /* internal error, cannot get here */
+ Assert(0);
+ break;
}
return false;
}
PgBenchValue *retval)
{
/* evaluate all function arguments */
- int nargs = 0;
- PgBenchValue vargs[MAX_FARGS];
+ int nargs = 0;
+ PgBenchValue vargs[MAX_FARGS];
PgBenchExprLink *l = args;
- bool has_null = false;
+ bool has_null = false;
for (nargs = 0; nargs < MAX_FARGS && l != NULL; nargs++, l = l->next)
{
case PGBENCH_LSHIFT:
case PGBENCH_RSHIFT:
{
- int64 li, ri;
+ int64 li,
+ ri;
if (!coerceToInt(&vargs[0], &li) || !coerceToInt(&vargs[1], &ri))
return false;
setIntValue(retval, li << ri);
else if (func == PGBENCH_RSHIFT)
setIntValue(retval, li >> ri);
- else /* cannot get here */
+ else /* cannot get here */
Assert(0);
return true;
/* logical operators */
case PGBENCH_NOT:
{
- bool b;
+ bool b;
+
if (!coerceToBool(&vargs[0], &b))
return false;
fprintf(stderr, "int " INT64_FORMAT "\n", varg->u.ival);
else if (varg->type == PGBT_DOUBLE)
fprintf(stderr, "double %.*g\n", DBL_DIG, varg->u.dval);
- else /* internal error, unexpected type */
+ else /* internal error, unexpected type */
Assert(0);
*retval = *varg;
case PGBENCH_IS:
{
Assert(nargs == 2);
- /* note: this simple implementation is more permissive than SQL */
+
+ /*
+ * note: this simple implementation is more permissive than
+ * SQL
+ */
setBoolValue(retval,
vargs[0].type == vargs[1].type &&
vargs[0].u.bval == vargs[1].u.bval);
case PGBENCH_HASH_FNV1A:
case PGBENCH_HASH_MURMUR2:
{
- int64 val,
- seed;
+ int64 val,
+ seed;
Assert(nargs == 2);
if (command->meta == META_ELIF &&
conditional_stack_peek(st->cstack) == IFSTATE_TRUE)
{
- /* elif after executed block, skip eval and wait for endif */
+ /*
+ * elif after executed block, skip eval and wait
+ * for endif
+ */
conditional_stack_poke(st->cstack, IFSTATE_IGNORED);
goto move_to_end_command;
}
break;
}
}
- else /* if and elif evaluated cases */
+ else /* if and elif evaluated cases */
{
- bool cond = valueTruth(&result);
+ bool cond = valueTruth(&result);
/* execute or not depending on evaluated condition */
if (command->meta == META_IF)
{
conditional_stack_push(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE);
}
- else /* elif */
+ else /* elif */
{
- /* we should get here only if the "elif" needed evaluation */
+ /*
+ * we should get here only if the "elif"
+ * needed evaluation
+ */
Assert(conditional_stack_peek(st->cstack) == IFSTATE_FALSE);
conditional_stack_poke(st->cstack, cond ? IFSTATE_TRUE : IFSTATE_FALSE);
}
conditional_stack_poke(st->cstack, IFSTATE_ELSE_FALSE);
break;
case IFSTATE_FALSE: /* inconsistent if active */
- case IFSTATE_IGNORED: /* inconsistent if active */
- case IFSTATE_NONE: /* else without if */
+ case IFSTATE_IGNORED: /* inconsistent if active */
+ case IFSTATE_NONE: /* else without if */
case IFSTATE_ELSE_TRUE: /* else after else */
- case IFSTATE_ELSE_FALSE: /* else after else */
+ case IFSTATE_ELSE_FALSE: /* else after else */
default:
/* dead code if conditional check is ok */
Assert(false);
}
}
- move_to_end_command:
+ move_to_end_command:
+
/*
- * executing the expression or shell command might
- * take a non-negligible amount of time, so reset
- * 'now'
+ * executing the expression or shell command might take a
+ * non-negligible amount of time, so reset 'now'
*/
INSTR_TIME_SET_ZERO(now);
/* cannot reach end of script in that state */
Assert(command != NULL);
- /* if this is conditional related, update conditional state */
+ /*
+ * if this is conditional related, update conditional
+ * state
+ */
if (command->type == META_COMMAND &&
(command->meta == META_IF ||
command->meta == META_ELIF ||
{
switch (conditional_stack_peek(st->cstack))
{
- case IFSTATE_FALSE:
- if (command->meta == META_IF || command->meta == META_ELIF)
- {
- /* we must evaluate the condition */
- st->state = CSTATE_START_COMMAND;
- }
- else if (command->meta == META_ELSE)
- {
- /* we must execute next command */
- conditional_stack_poke(st->cstack, IFSTATE_ELSE_TRUE);
- st->state = CSTATE_START_COMMAND;
- st->command++;
- }
- else if (command->meta == META_ENDIF)
- {
- Assert(!conditional_stack_empty(st->cstack));
- conditional_stack_pop(st->cstack);
- if (conditional_active(st->cstack))
+ case IFSTATE_FALSE:
+ if (command->meta == META_IF || command->meta == META_ELIF)
+ {
+ /* we must evaluate the condition */
+ st->state = CSTATE_START_COMMAND;
+ }
+ else if (command->meta == META_ELSE)
+ {
+ /* we must execute next command */
+ conditional_stack_poke(st->cstack, IFSTATE_ELSE_TRUE);
st->state = CSTATE_START_COMMAND;
- /* else state remains in CSTATE_SKIP_COMMAND */
+ st->command++;
+ }
+ else if (command->meta == META_ENDIF)
+ {
+ Assert(!conditional_stack_empty(st->cstack));
+ conditional_stack_pop(st->cstack);
+ if (conditional_active(st->cstack))
+ st->state = CSTATE_START_COMMAND;
+
+ /*
+ * else state remains in
+ * CSTATE_SKIP_COMMAND
+ */
+ st->command++;
+ }
+ break;
+
+ case IFSTATE_IGNORED:
+ case IFSTATE_ELSE_FALSE:
+ if (command->meta == META_IF)
+ conditional_stack_push(st->cstack, IFSTATE_IGNORED);
+ else if (command->meta == META_ENDIF)
+ {
+ Assert(!conditional_stack_empty(st->cstack));
+ conditional_stack_pop(st->cstack);
+ if (conditional_active(st->cstack))
+ st->state = CSTATE_START_COMMAND;
+ }
+ /* could detect "else" & "elif" after "else" */
st->command++;
- }
- break;
+ break;
- case IFSTATE_IGNORED:
- case IFSTATE_ELSE_FALSE:
- if (command->meta == META_IF)
- conditional_stack_push(st->cstack, IFSTATE_IGNORED);
- else if (command->meta == META_ENDIF)
- {
- Assert(!conditional_stack_empty(st->cstack));
- conditional_stack_pop(st->cstack);
- if (conditional_active(st->cstack))
- st->state = CSTATE_START_COMMAND;
- }
- /* could detect "else" & "elif" after "else" */
- st->command++;
- break;
+ case IFSTATE_NONE:
+ case IFSTATE_TRUE:
+ case IFSTATE_ELSE_TRUE:
+ default:
- case IFSTATE_NONE:
- case IFSTATE_TRUE:
- case IFSTATE_ELSE_TRUE:
- default:
- /* inconsistent if inactive, unreachable dead code */
- Assert(false);
+ /*
+ * inconsistent if inactive, unreachable dead
+ * code
+ */
+ Assert(false);
}
}
else
{
/* statically check conditional structure */
ConditionalStack cs = conditional_stack_create();
- int i;
- for (i = 0 ; ps.commands[i] != NULL ; i++)
+ int i;
+
+ for (i = 0; ps.commands[i] != NULL; i++)
{
- Command *cmd = ps.commands[i];
+ Command *cmd = ps.commands[i];
+
if (cmd->type == META_COMMAND)
{
switch (cmd->meta)
{
- case META_IF:
- conditional_stack_push(cs, IFSTATE_FALSE);
- break;
- case META_ELIF:
- if (conditional_stack_empty(cs))
- ConditionError(ps.desc, i+1, "\\elif without matching \\if");
- if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE)
- ConditionError(ps.desc, i+1, "\\elif after \\else");
- break;
- case META_ELSE:
- if (conditional_stack_empty(cs))
- ConditionError(ps.desc, i+1, "\\else without matching \\if");
- if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE)
- ConditionError(ps.desc, i+1, "\\else after \\else");
- conditional_stack_poke(cs, IFSTATE_ELSE_FALSE);
- break;
- case META_ENDIF:
- if (!conditional_stack_pop(cs))
- ConditionError(ps.desc, i+1, "\\endif without matching \\if");
- break;
- default:
- /* ignore anything else... */
- break;
+ case META_IF:
+ conditional_stack_push(cs, IFSTATE_FALSE);
+ break;
+ case META_ELIF:
+ if (conditional_stack_empty(cs))
+ ConditionError(ps.desc, i + 1, "\\elif without matching \\if");
+ if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE)
+ ConditionError(ps.desc, i + 1, "\\elif after \\else");
+ break;
+ case META_ELSE:
+ if (conditional_stack_empty(cs))
+ ConditionError(ps.desc, i + 1, "\\else without matching \\if");
+ if (conditional_stack_peek(cs) == IFSTATE_ELSE_FALSE)
+ ConditionError(ps.desc, i + 1, "\\else after \\else");
+ conditional_stack_poke(cs, IFSTATE_ELSE_FALSE);
+ break;
+ case META_ENDIF:
+ if (!conditional_stack_pop(cs))
+ ConditionError(ps.desc, i + 1, "\\endif without matching \\if");
+ break;
+ default:
+ /* ignore anything else... */
+ break;
}
}
}
if (!conditional_stack_empty(cs))
- ConditionError(ps.desc, i+1, "\\if without matching \\endif");
+ ConditionError(ps.desc, i + 1, "\\if without matching \\endif");
conditional_stack_destroy(cs);
}
{
/* rely on current time */
instr_time now;
+
INSTR_TIME_SET_CURRENT(now);
iseed = (unsigned int) INSTR_TIME_GET_MICROSEC(now);
}
else
{
/* parse seed unsigned int value */
- char garbage;
+ char garbage;
+
if (sscanf(seed, "%u%c", &iseed, &garbage) != 1)
{
fprintf(stderr,
if (var->value.type != PGBT_NO_VALUE)
{
if (!putVariableValue(&state[i], "startup",
- var->name, &var->value))
+ var->name, &var->value))
exit(1);
}
else
/* set default seed for hash functions */
if (lookupVariable(&state[0], "default_seed") == NULL)
{
- uint64 seed = ((uint64) (random() & 0xFFFF) << 48) |
- ((uint64) (random() & 0xFFFF) << 32) |
- ((uint64) (random() & 0xFFFF) << 16) |
- (uint64) (random() & 0xFFFF);
+ uint64 seed = ((uint64) (random() & 0xFFFF) << 48) |
+ ((uint64) (random() & 0xFFFF) << 32) |
+ ((uint64) (random() & 0xFFFF) << 16) |
+ (uint64) (random() & 0xFFFF);
for (i = 0; i < nclients; i++)
if (!putVariableInt(&state[i], "startup", "default_seed", (int64) seed))
/*
* Commands not allowed within transactions. The statements checked for
- * here should be exactly those that call PreventInTransactionBlock() in the
- * backend.
+ * here should be exactly those that call PreventInTransactionBlock() in
+ * the backend.
*/
if (wordlen == 6 && pg_strncasecmp(query, "vacuum", 6) == 0)
return true;
/*
* If we found a command word, check whether the rest of the line
* contains only whitespace plus maybe one semicolon. If not,
- * ignore the command word after all. These commands are only
- * for compatibility with other SQL clients and are not
- * documented.
+ * ignore the command word after all. These commands are only for
+ * compatibility with other SQL clients and are not documented.
*/
if (rest_of_line != NULL)
{
}
/*
- * If they typed "\q" in a place where "\q" is not active,
- * supply a hint. The text is still added to the query
- * buffer.
+ * If they typed "\q" in a place where "\q" is not active, supply
+ * a hint. The text is still added to the query buffer.
*/
if (found_q && query_buf->len != 0 &&
prompt_status != PROMPT_READY &&
prompt_status != PROMPT_CONTINUE &&
prompt_status != PROMPT_PAREN)
#ifndef WIN32
- puts(_("Use control-D to quit."));
+ puts(_("Use control-D to quit."));
#else
- puts(_("Use control-C to quit."));
+ puts(_("Use control-C to quit."));
#endif
}
/* ALTER INDEX <foo> SET|RESET ( */
else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "("))
COMPLETE_WITH_LIST7("fillfactor", "recheck_on_update",
- "fastupdate", "gin_pending_list_limit", /* GIN */
+ "fastupdate", "gin_pending_list_limit", /* GIN */
"buffering", /* GiST */
"pages_per_range", "autosummarize" /* BRIN */
);
else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "("))
COMPLETE_WITH_LIST7("fillfactor =", "recheck_on_update =",
- "fastupdate =", "gin_pending_list_limit =", /* GIN */
+ "fastupdate =", "gin_pending_list_limit =", /* GIN */
"buffering =", /* GiST */
"pages_per_range =", "autosummarize =" /* BRIN */
);
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes,
" UNION SELECT 'ON'"
" UNION SELECT 'CONCURRENTLY'");
+
/*
* Complete ... INDEX|CONCURRENTLY [<name>] ON with a list of relations
* that can indexes can be created on
/* Complete SET <var> with "TO" */
else if (Matches2("SET", MatchAny))
COMPLETE_WITH_CONST("TO");
- /* Complete ALTER DATABASE|FUNCTION||PROCEDURE|ROLE|ROUTINE|USER ... SET <name> */
+
+ /*
+ * Complete ALTER DATABASE|FUNCTION||PROCEDURE|ROLE|ROUTINE|USER ... SET
+ * <name>
+ */
else if (HeadMatches2("ALTER", "DATABASE|FUNCTION|PROCEDURE|ROLE|ROUTINE|USER") &&
TailMatches2("SET", MatchAny))
COMPLETE_WITH_LIST2("FROM CURRENT", "TO");
return -1;
else
{
- IfStackElem *p = cstack->head;
+ IfStackElem *p = cstack->head;
int depth = 0;
+
while (p != NULL)
{
depth++;
extern Datum gintuple_get_key(GinState *ginstate, IndexTuple tuple,
GinNullCategory *category);
extern void GinCheckForSerializableConflictIn(Relation relation,
- HeapTuple tuple, Buffer buffer);
+ HeapTuple tuple, Buffer buffer);
/* gininsert.c */
extern IndexBuildResult *ginbuild(Relation heap, Relation index,
Oid dbId;
uint32 nrelids;
uint8 flags;
- Oid relids[FLEXIBLE_ARRAY_MEMBER];
+ Oid relids[FLEXIBLE_ARRAY_MEMBER];
} xl_heap_truncate;
#define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids))
BlockNumber btm_fastroot; /* current "fast" root location */
uint32 btm_fastlevel; /* tree level of the "fast" root page */
/* following fields are available since page version 3 */
- TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among of
- * deleted pages */
- float8 btm_last_cleanup_num_heap_tuples; /* number of heap tuples
- * during last cleanup */
+ TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among of deleted
+ * pages */
+ float8 btm_last_cleanup_num_heap_tuples; /* number of heap tuples
+ * during last cleanup */
} BTMetaPageData;
#define BTPageGetMeta(p) \
#define BTREE_METAPAGE 0 /* first page is meta */
#define BTREE_MAGIC 0x053162 /* magic number of btree pages */
#define BTREE_VERSION 3 /* current version number */
-#define BTREE_MIN_VERSION 2 /* minimal supported version number */
+#define BTREE_MIN_VERSION 2 /* minimal supported version number */
/*
* Maximum size of a btree index entry, including its tuple header.
*/
extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level);
extern void _bt_update_meta_cleanup_info(Relation rel,
- TransactionId oldestBtpoXact, float8 numHeapTuples);
+ TransactionId oldestBtpoXact, float8 numHeapTuples);
extern void _bt_upgrademetapage(Page page);
extern Buffer _bt_getroot(Relation rel, int access);
extern Buffer _bt_gettrueroot(Relation rel);
RELOPT_KIND_PARTITIONED = (1 << 11),
/* if you add a new kind, make sure you update "last_default" too */
RELOPT_KIND_LAST_DEFAULT = RELOPT_KIND_PARTITIONED,
- RELOPT_KIND_INDEX = RELOPT_KIND_BTREE|RELOPT_KIND_HASH|RELOPT_KIND_GIN|RELOPT_KIND_SPGIST,
+ RELOPT_KIND_INDEX = RELOPT_KIND_BTREE | RELOPT_KIND_HASH | RELOPT_KIND_GIN | RELOPT_KIND_SPGIST,
/* some compilers treat enums as signed ints, so we can't use 1 << 31 */
RELOPT_KIND_MAX = (1 << 30)
} relopt_kind;
* workers so far. */
bool phs_snapshot_any; /* SnapshotAny, not phs_snapshot_data? */
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER];
-} ParallelHeapScanDescData;
+} ParallelHeapScanDescData;
typedef struct HeapScanDescData
{
spgConfigOut config; /* filled in by opclass config method */
SpGistTypeDesc attType; /* type of values to be indexed/restored */
- SpGistTypeDesc attLeafType; /* type of leaf-tuple values */
+ SpGistTypeDesc attLeafType; /* type of leaf-tuple values */
SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */
SpGistTypeDesc attLabelType; /* type of node label values */
spgConfigOut config; /* filled in by opclass config method */
SpGistTypeDesc attType; /* type of values to be indexed/restored */
- SpGistTypeDesc attLeafType; /* type of leaf-tuple values */
+ SpGistTypeDesc attLeafType; /* type of leaf-tuple values */
SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */
SpGistTypeDesc attLabelType; /* type of node label values */
extern TransactionId PrescanPreparedTransactions(TransactionId **xids_p,
int *nxids_p);
extern void ParsePrepareRecord(uint8 info, char *xlrec,
- xl_xact_parsed_prepare *parsed);
+ xl_xact_parsed_prepare *parsed);
extern void StandbyRecoverPreparedTransactions(void);
extern void RecoverPreparedTransactions(void);
SharedInvalidationMessage *msgs;
TransactionId twophase_xid; /* only for 2PC */
- char twophase_gid[GIDSIZE]; /* only for 2PC */
+ char twophase_gid[GIDSIZE]; /* only for 2PC */
int nabortrels; /* only for 2PC */
RelFileNode *abortnodes; /* only for 2PC */
RelFileNode *xnodes;
TransactionId twophase_xid; /* only for 2PC */
- char twophase_gid[GIDSIZE]; /* only for 2PC */
+ char twophase_gid[GIDSIZE]; /* only for 2PC */
XLogRecPtr origin_lsn;
TimestampTz origin_timestamp;
bool relispopulated; /* matview currently holds query results */
char relreplident; /* see REPLICA_IDENTITY_xxx constants */
bool relispartition; /* is relation a partition? */
- Oid relrewrite; /* heap for rewrite during DDL, link to original rel */
+ Oid relrewrite; /* heap for rewrite during DDL, link to
+ * original rel */
TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */
TransactionId relminmxid; /* all multixacts in this rel are >= this.
* this is really a MultiXactId */
extern void ExecuteTruncate(TruncateStmt *stmt);
extern void ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
- DropBehavior behavior, bool restart_seqs);
+ DropBehavior behavior, bool restart_seqs);
extern void SetRelationHasSubclass(Oid relationId, bool relhassubclass);
if (res > PG_INT16_MAX || res < PG_INT16_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int16) res;
if (res > PG_INT16_MAX || res < PG_INT16_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int16) res;
if (res > PG_INT16_MAX || res < PG_INT16_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int16) res;
if (res > PG_INT32_MAX || res < PG_INT32_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int32) res;
if (res > PG_INT32_MAX || res < PG_INT32_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int32) res;
if (res > PG_INT32_MAX || res < PG_INT32_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int32) res;
if (res > PG_INT64_MAX || res < PG_INT64_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int64) res;
if ((a > 0 && b > 0 && a > PG_INT64_MAX - b) ||
(a < 0 && b < 0 && a < PG_INT64_MIN - b))
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = a + b;
if (res > PG_INT64_MAX || res < PG_INT64_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int64) res;
if ((a < 0 && b > 0 && a < PG_INT64_MIN + b) ||
(a > 0 && b < 0 && a > PG_INT64_MAX + b))
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = a - b;
if (res > PG_INT64_MAX || res < PG_INT64_MIN)
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = (int64) res;
(a < 0 && b > 0 && a < PG_INT64_MIN / b) ||
(a < 0 && b < 0 && a < PG_INT64_MAX / b)))
{
- *result = 0x5EED; /* to avoid spurious warnings */
+ *result = 0x5EED; /* to avoid spurious warnings */
return true;
}
*result = a * b;
/* Name of SCRAM mechanisms per IANA */
#define SCRAM_SHA_256_NAME "SCRAM-SHA-256"
-#define SCRAM_SHA_256_PLUS_NAME "SCRAM-SHA-256-PLUS" /* with channel binding */
+#define SCRAM_SHA_256_PLUS_NAME "SCRAM-SHA-256-PLUS" /* with channel binding */
/* Channel binding types */
#define SCRAM_CHANNEL_BINDING_TLS_UNIQUE "tls-unique"
extern bool pg_str_endswith(const char *str, const char *end);
extern int strtoint(const char *pg_restrict str, char **pg_restrict endptr,
- int base);
+ int base);
#endif /* COMMON_STRING_H */
* expression evaluation, reducing code duplication.
*/
extern void ExecEvalFuncExprFusage(ExprState *state, ExprEvalStep *op,
- ExprContext *econtext);
+ ExprContext *econtext);
extern void ExecEvalFuncExprStrictFusage(ExprState *state, ExprEvalStep *op,
- ExprContext *econtext);
+ ExprContext *econtext);
extern void ExecEvalParamExec(ExprState *state, ExprEvalStep *op,
ExprContext *econtext);
extern void ExecEvalParamExecParams(Bitmapset *params, EState *estate);
TupleTableSlot *slot,
EState *estate);
extern ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo,
- PartitionTupleRouting *proute,
- EState *estate, int partidx);
+ ResultRelInfo *resultRelInfo,
+ PartitionTupleRouting *proute,
+ EState *estate, int partidx);
extern void ExecInitRoutingInfo(ModifyTableState *mtstate,
EState *estate,
PartitionTupleRouting *proute,
extern void ExecCleanupTupleRouting(ModifyTableState *mtstate,
PartitionTupleRouting *proute);
extern PartitionPruneState *ExecSetupPartitionPruneState(PlanState *planstate,
- List *partitionpruneinfo);
+ List *partitionpruneinfo);
extern Bitmapset *ExecFindMatchingSubPlans(PartitionPruneState *prunestate);
extern Bitmapset *ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate,
int nsubnodes);
extern void ExecInitResultTupleSlotTL(EState *estate, PlanState *planstate);
extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate, TupleDesc tupleDesc);
extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate,
- TupleDesc tupleDesc);
+ TupleDesc tupleDesc);
extern TupleTableSlot *ExecInitNullTupleSlot(EState *estate,
TupleDesc tupType);
extern TupleDesc ExecTypeFromTL(List *targetList, bool hasoid);
double ntuples; /* Total tuples produced */
double ntuples2; /* Secondary node-specific tuple counter */
double nloops; /* # of run cycles for this node */
- double nfiltered1; /* # tuples removed by scanqual or joinqual */
- double nfiltered2; /* # tuples removed by "other" quals */
+ double nfiltered1; /* # tuples removed by scanqual or joinqual */
+ double nfiltered2; /* # tuples removed by "other" quals */
BufferUsage bufusage; /* Total buffer usage */
} Instrumentation;
QueryEnvironment *queryEnv; /* query environment setup for SPI level */
/* transaction management support */
- bool atomic; /* atomic execution context, does not allow transactions */
- bool internal_xact; /* SPI-managed transaction boundary, skip cleanup */
+ bool atomic; /* atomic execution context, does not allow
+ * transactions */
+ bool internal_xact; /* SPI-managed transaction boundary, skip
+ * cleanup */
} _SPI_connection;
/*
HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */
#define FIELDNO_TUPLETABLESLOT_OFF 14
uint32 tts_off; /* saved state for slot_deform_tuple */
- bool tts_fixedTupleDescriptor; /* descriptor can't be changed */
+ bool tts_fixedTupleDescriptor; /* descriptor can't be changed */
} TupleTableSlot;
#define TTS_HAS_PHYSICAL_TUPLE(slot) \
extern void conditional_stack_destroy(ConditionalStack cstack);
-extern int conditional_stack_depth(ConditionalStack cstack);
+extern int conditional_stack_depth(ConditionalStack cstack);
extern void conditional_stack_push(ConditionalStack cstack, ifState new_state);
* prototypes for functions in be-secure-common.c
*/
extern int run_ssl_passphrase_command(const char *prompt, bool is_server_start,
- char *buf, int size);
+ char *buf, int size);
extern bool check_ssl_key_file_permissions(const char *ssl_key_file,
- bool isServerStart);
+ bool isServerStart);
#endif /* LIBPQ_H */
int es_num_root_result_relations; /* length of the array */
/*
- * The following list contains ResultRelInfos created by the tuple
- * routing code for partitions that don't already have one.
+ * The following list contains ResultRelInfos created by the tuple routing
+ * code for partitions that don't already have one.
*/
List *es_tuple_routing_result_relations;
MemoryContext hashtempcxt; /* temp memory context for hash tables */
ExprContext *innerecontext; /* econtext for computing inner tuples */
AttrNumber *keyColIdx; /* control data for hash tables */
- Oid *tab_eq_funcoids;/* equality func oids for table datatype(s) */
+ Oid *tab_eq_funcoids; /* equality func oids for table
+ * datatype(s) */
FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */
FmgrInfo *tab_eq_funcs; /* equality functions for table datatype(s) */
FmgrInfo *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
PlanState **appendplans; /* array of PlanStates for my inputs */
int as_nplans;
int as_whichplan;
- int as_first_partial_plan; /* Index of 'appendplans' containing
- * the first partial plan */
+ int as_first_partial_plan; /* Index of 'appendplans' containing
+ * the first partial plan */
ParallelAppendState *as_pstate; /* parallel coordination info */
Size pstate_len; /* size of parallel coordination info */
struct PartitionPruneState *as_prune_state;
WindowStatePerFunc perfunc; /* per-window-function information */
WindowStatePerAgg peragg; /* per-plain-aggregate information */
- ExprState *partEqfunction; /* equality funcs for partition columns */
- ExprState *ordEqfunction; /* equality funcs for ordering columns */
+ ExprState *partEqfunction; /* equality funcs for partition columns */
+ ExprState *ordEqfunction; /* equality funcs for ordering columns */
Tuplestorestate *buffer; /* stores rows of current partition */
int current_ptr; /* read pointer # for current row */
int framehead_ptr; /* read pointer # for frame head, if used */
typedef struct UniqueState
{
PlanState ps; /* its first field is NodeTag */
- ExprState *eqfunction; /* tuple equality qual */
+ ExprState *eqfunction; /* tuple equality qual */
} UniqueState;
/* ----------------
Node *raw_default; /* default value (untransformed parse tree) */
Node *cooked_default; /* default value (transformed expr tree) */
char identity; /* attidentity setting */
- RangeVar *identitySequence; /* to store identity sequence name for ALTER
- * TABLE ... ADD COLUMN */
+ RangeVar *identitySequence; /* to store identity sequence name for
+ * ALTER TABLE ... ADD COLUMN */
CollateClause *collClause; /* untransformed COLLATE spec, if any */
Oid collOid; /* collation OID (InvalidOid if not set) */
List *constraints; /* other constraints on column */
NodeTag type;
TransactionStmtKind kind; /* see above */
List *options; /* for BEGIN/START commands */
- char *savepoint_name; /* for savepoint commands */
+ char *savepoint_name; /* for savepoint commands */
char *gid; /* for two-phase-commit related commands */
} TransactionStmt;
Index qual_security_level; /* minimum security_level for quals */
/* Note: qual_security_level is zero if there are no securityQuals */
- InheritanceKind inhTargetKind; /* indicates if the target relation is an
- * inheritance child or partition or a
- * partitioned table */
+ InheritanceKind inhTargetKind; /* indicates if the target relation is an
+ * inheritance child or partition or a
+ * partitioned table */
bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */
bool hasLateralRTEs; /* true if any RTEs are marked LATERAL */
bool hasDeletedRTEs; /* true if any RTE was deleted from jointree */
extern PGDLLIMPORT double parallel_setup_cost;
extern PGDLLIMPORT int effective_cache_size;
extern PGDLLIMPORT Cost disable_cost;
-extern PGDLLIMPORT int max_parallel_workers_per_gather;
+extern PGDLLIMPORT int max_parallel_workers_per_gather;
extern PGDLLIMPORT bool enable_seqscan;
extern PGDLLIMPORT bool enable_indexscan;
extern PGDLLIMPORT bool enable_indexonlyscan;
extern PGDLLIMPORT bool enable_parallel_append;
extern PGDLLIMPORT bool enable_parallel_hash;
extern PGDLLIMPORT bool enable_partition_pruning;
-extern PGDLLIMPORT int constraint_exclusion;
+extern PGDLLIMPORT int constraint_exclusion;
extern double clamp_row_est(double nrows);
extern double index_pages_fetched(double tuples_fetched, BlockNumber pages,
* allpaths.c
*/
extern PGDLLIMPORT bool enable_geqo;
-extern PGDLLIMPORT int geqo_threshold;
-extern PGDLLIMPORT int min_parallel_table_scan_size;
-extern PGDLLIMPORT int min_parallel_index_scan_size;
+extern PGDLLIMPORT int geqo_threshold;
+extern PGDLLIMPORT int min_parallel_table_scan_size;
+extern PGDLLIMPORT int min_parallel_index_scan_size;
/* Hook for plugins to get control in set_rel_pathlist() */
typedef void (*set_rel_pathlist_hook_type) (PlannerInfo *root,
extern void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel,
Path *bitmapqual);
extern void generate_partitionwise_join_paths(PlannerInfo *root,
- RelOptInfo *rel);
+ RelOptInfo *rel);
#ifdef OPTIMIZER_DEBUG
extern void debug_print_rel(PlannerInfo *root, RelOptInfo *rel);
extern Node *ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
- Node *last_srf, FuncCall *fn, bool proc_call,
- int location);
+ Node *last_srf, FuncCall *fn, bool proc_call,
+ int location);
extern FuncDetailCode func_get_detail(List *funcname,
List *fargs, List *fargnames,
struct SnapBuild *snapshot_builder;
/*
- * Marks the logical decoding context as fast forward decoding one.
- * Such a context does not have plugin loaded so most of the the following
+ * Marks the logical decoding context as fast forward decoding one. Such a
+ * context does not have plugin loaded so most of the the following
* properties are unused.
*/
- bool fast_forward;
+ bool fast_forward;
OutputPluginCallbacks callbacks;
OutputPluginOptions options;
extern LogicalRepRelId logicalrep_read_delete(StringInfo in,
LogicalRepTupleData *oldtup);
extern void logicalrep_write_truncate(StringInfo out, int nrelids, Oid relids[],
- bool cascade, bool restart_seqs);
+ bool cascade, bool restart_seqs);
extern List *logicalrep_read_truncate(StringInfo in,
- bool *cascade, bool *restart_seqs);
+ bool *cascade, bool *restart_seqs);
extern void logicalrep_write_rel(StringInfo out, Relation rel);
extern LogicalRepRelation *logicalrep_read_rel(StringInfo in);
extern void logicalrep_write_typ(StringInfo out, Oid typoid);
} tp;
/*
- * Truncate data for REORDER_BUFFER_CHANGE_TRUNCATE representing
- * one set of relations to be truncated.
+ * Truncate data for REORDER_BUFFER_CHANGE_TRUNCATE representing one
+ * set of relations to be truncated.
*/
struct
{
bool cascade;
bool restart_seqs;
Oid *relids;
- } truncate;
+ } truncate;
/* Message with arbitrary data. */
struct
char conninfo[MAXCONNINFO];
/*
- * Host name (this can be a host name, an IP address, or a directory
- * path) and port number of the active replication connection.
+ * Host name (this can be a host name, an IP address, or a directory path)
+ * and port number of the active replication connection.
*/
char sender_host[NI_MAXHOST];
int sender_port;
typedef void (*walrcv_check_conninfo_fn) (const char *conninfo);
typedef char *(*walrcv_get_conninfo_fn) (WalReceiverConn *conn);
typedef void (*walrcv_get_senderinfo_fn) (WalReceiverConn *conn,
- char **sender_host,
- int *sender_port);
+ char **sender_host,
+ int *sender_port);
typedef char *(*walrcv_identify_system_fn) (WalReceiverConn *conn,
TimeLineID *primary_tli,
int *server_version);
extern void ResetUnloggedRelations(int op);
extern bool parse_filename_for_nontemp_relation(const char *name,
- int *oidchars, ForkNumber *fork);
+ int *oidchars, ForkNumber *fork);
#define UNLOGGED_RELATION_CLEANUP 0x0001
#define UNLOGGED_RELATION_INIT 0x0002
{
PROCESS_UTILITY_TOPLEVEL, /* toplevel interactive command */
PROCESS_UTILITY_QUERY, /* a complete query, but not toplevel */
- PROCESS_UTILITY_QUERY_NONATOMIC, /* a complete query, nonatomic execution context */
+ PROCESS_UTILITY_QUERY_NONATOMIC, /* a complete query, nonatomic
+ * execution context */
PROCESS_UTILITY_SUBCOMMAND /* a portion of a query */
} ProcessUtilityContext;
#define P_TSQ_WEB (1 << 1)
extern TSQuery parse_tsquery(char *buf,
- PushFunction pushval,
- Datum opaque,
- int flags);
+ PushFunction pushval,
+ Datum opaque,
+ int flags);
/* Functions for use by PushFunction implementations */
extern void pushValue(TSQueryParserState state,
* Flag types for iterate_json(b)_values to specify what elements from a
* json(b) document we want to iterate.
*/
-typedef enum JsonToIndex {
- jtiKey = 0x01,
- jtiString = 0x02,
- jtiNumeric = 0x04,
- jtiBool = 0x08,
- jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool
+typedef enum JsonToIndex
+{
+ jtiKey = 0x01,
+ jtiString = 0x02,
+ jtiNumeric = 0x04,
+ jtiBool = 0x08,
+ jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool
} JsonToIndex;
/* an action that will be applied to each value in iterate_json(b)_vaues functions */
extern uint32 parse_jsonb_index_flags(Jsonb *jb);
extern void iterate_jsonb_values(Jsonb *jb, uint32 flags, void *state,
- JsonIterateStringValuesAction action);
+ JsonIterateStringValuesAction action);
extern void iterate_json_values(text *json, uint32 flags, void *action_state,
- JsonIterateStringValuesAction action);
+ JsonIterateStringValuesAction action);
extern Jsonb *transform_jsonb_string_values(Jsonb *jsonb, void *action_state,
JsonTransformStringValuesAction transform_action);
extern text *transform_json_string_values(text *json, void *action_state,
bool *parttypbyval;
char *parttypalign;
Oid *parttypcoll;
-} PartitionKeyData;
+} PartitionKeyData;
extern void RelationBuildPartitionKey(Relation relation);
extern void RelationBuildPartitionDesc(Relation rel);
/* Bookkeeping data */
const char *name; /* portal's name */
const char *prepStmtName; /* source prepared statement (NULL if none) */
- MemoryContext portalContext;/* subsidiary memory for portal */
+ MemoryContext portalContext; /* subsidiary memory for portal */
ResourceOwner resowner; /* resources owned by portal */
void (*cleanup) (Portal portal); /* cleanup hook */
Bitmapset *rd_keyattr; /* cols that can be ref'd by foreign keys */
Bitmapset *rd_pkattr; /* cols included in primary key */
Bitmapset *rd_idattr; /* included in replica identity index */
- Bitmapset *rd_projidx; /* Oids of projection indexes */
+ Bitmapset *rd_projidx; /* Oids of projection indexes */
PublicationActions *rd_pubactions; /* publication actions */
typedef struct GenericIndexOpts
{
int32 vl_len_;
- bool recheck_on_update;
+ bool recheck_on_update;
} GenericIndexOpts;
/*
/* support for JITContext management */
extern void ResourceOwnerEnlargeJIT(ResourceOwner owner);
extern void ResourceOwnerRememberJIT(ResourceOwner owner,
- Datum handle);
+ Datum handle);
extern void ResourceOwnerForgetJIT(ResourceOwner owner,
- Datum handle);
+ Datum handle);
#endif /* RESOWNER_PRIVATE_H */
/* Private opaque state (points to shared memory) */
Sharedsort *sharedsort;
-} SortCoordinateData;
+} SortCoordinateData;
typedef struct SortCoordinateData *SortCoordinate;
if (varcharsize == 0 || varcharsize > size)
{
- /* compatibility mode, blank pad and null terminate char array */
+ /*
+ * compatibility mode, blank pad and null
+ * terminate char array
+ */
if (ORACLE_MODE(compat) && (type == ECPGt_char || type == ECPGt_unsigned_char))
{
memset(str, ' ', varcharsize);
memcpy(str, pval, size);
- str[varcharsize-1] = '\0';
+ str[varcharsize - 1] = '\0';
- /* compatibility mode empty string gets -1 indicator but no warning */
- if (size == 0) {
+ /*
+ * compatibility mode empty string gets -1
+ * indicator but no warning
+ */
+ if (size == 0)
+ {
/* truncation */
switch (ind_type)
{
case ECPGt_unsigned_long:
*((long *) (ind + ind_offset * act_tuple)) = -1;
break;
- #ifdef HAVE_LONG_LONG_INT
+#ifdef HAVE_LONG_LONG_INT
case ECPGt_long_long:
case ECPGt_unsigned_long_long:
*((long long int *) (ind + ind_offset * act_tuple)) = -1;
break;
- #endif /* HAVE_LONG_LONG_INT */
+#endif /* HAVE_LONG_LONG_INT */
default:
break;
}
if (ORACLE_MODE(compat) && (varcharsize - 1) < size)
{
if (type == ECPGt_char || type == ECPGt_unsigned_char)
- str[varcharsize-1] = '\0';
+ str[varcharsize - 1] = '\0';
}
if (varcharsize < size || (ORACLE_MODE(compat) && (varcharsize - 1) < size))
}
}
- if (output_filename && out_option == 0) {
+ if (output_filename && out_option == 0)
+ {
free(output_filename);
output_filename = NULL;
}
if (ind_p != NULL && ind_p != &struct_no_indicator)
{
ind_p = ind_p->next;
- if (ind_p == NULL && p->next != NULL) {
+ if (ind_p == NULL && p->next != NULL)
+ {
mmerror(PARSE_ERROR, ET_WARNING, "indicator struct \"%s\" has too few members", ind_name);
ind_p = &struct_no_indicator;
}
}
}
- if (ind_type != NULL && ind_p != NULL && ind_p != &struct_no_indicator) {
+ if (ind_type != NULL && ind_p != NULL && ind_p != &struct_no_indicator)
+ {
mmerror(PARSE_ERROR, ET_WARNING, "indicator struct \"%s\" has too many members", ind_name);
}
{"scram_channel_binding", NULL, DefaultSCRAMChannelBinding, NULL,
"SCRAM-Channel-Binding", "D",
- 21, /* sizeof("tls-server-end-point") == 21 */
+ 21, /* sizeof("tls-server-end-point") == 21 */
offsetof(struct pg_conn, scram_channel_binding)},
/*
static int verify_cb(int ok, X509_STORE_CTX *ctx);
static int openssl_verify_peer_name_matches_certificate_name(PGconn *conn,
- ASN1_STRING *name,
- char **store_name);
+ ASN1_STRING *name,
+ char **store_name);
static void destroy_ssl_system(void);
static int initialize_SSL(PGconn *conn);
static PostgresPollingStatusType open_client_SSL(PGconn *);
(*names_examined)++;
rc = openssl_verify_peer_name_matches_certificate_name(conn,
- name->d.dNSName,
- &alt_name);
+ name->d.dNSName,
+ &alt_name);
if (alt_name)
{
{
(*names_examined)++;
rc = openssl_verify_peer_name_matches_certificate_name(
- conn,
- X509_NAME_ENTRY_get_data(
- X509_NAME_get_entry(subject_name, cn_index)),
- first_name);
+ conn,
+ X509_NAME_ENTRY_get_data(
+ X509_NAME_get_entry(subject_name, cn_index)),
+ first_name);
}
}
}
#ifdef SSL_OP_NO_COMPRESSION
if (conn->sslcompression && conn->sslcompression[0] == '0')
SSL_set_options(conn->ssl, SSL_OP_NO_COMPRESSION);
+
/*
* Mainline OpenSSL introduced SSL_clear_options() before
* SSL_OP_NO_COMPRESSION, so this following #ifdef should not be
* retransmits */
char *keepalives_count; /* maximum number of TCP keepalive
* retransmits */
- char *scram_channel_binding; /* SCRAM channel binding type */
+ char *scram_channel_binding; /* SCRAM channel binding type */
char *sslmode; /* SSL mode (require,prefer,allow,disable) */
char *sslcompression; /* SSL compression (0 or 1) */
char *sslkey; /* client key filename */
*
*/
extern int pgtls_verify_peer_name_matches_certificate_guts(PGconn *conn,
- int *names_examined,
- char **first_name);
+ int *names_examined,
+ char **first_name);
/* === miscellaneous macros === */
{
if (rettypeid == VOIDOID ||
rettypeid == RECORDOID)
- /* okay */ ;
+ /* okay */ ;
else if (rettypeid == TRIGGEROID || rettypeid == EVTTRIGGEROID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
function->fn_rettyplen = typeStruct->typlen;
/*
- * install $0 reference, but only for polymorphic return
- * types, and not when the return is specified through an
- * output parameter.
+ * install $0 reference, but only for polymorphic return types,
+ * and not when the return is specified through an output
+ * parameter.
*/
if (IsPolymorphicType(procStruct->prorettype) &&
num_out_args == 0)
static int exec_stmt_perform(PLpgSQL_execstate *estate,
PLpgSQL_stmt_perform *stmt);
static int exec_stmt_call(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_call *stmt);
+ PLpgSQL_stmt_call *stmt);
static int exec_stmt_getdiag(PLpgSQL_execstate *estate,
PLpgSQL_stmt_getdiag *stmt);
static int exec_stmt_if(PLpgSQL_execstate *estate,
static int exec_stmt_rollback(PLpgSQL_execstate *estate,
PLpgSQL_stmt_rollback *stmt);
static int exec_stmt_set(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_set *stmt);
+ PLpgSQL_stmt_set *stmt);
static void plpgsql_estate_setup(PLpgSQL_execstate *estate,
PLpgSQL_function *func,
static void exec_eval_cleanup(PLpgSQL_execstate *estate);
static void exec_prepare_plan(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, int cursorOptions,
- bool keepplan);
+ PLpgSQL_expr *expr, int cursorOptions,
+ bool keepplan);
static void exec_simple_check_plan(PLpgSQL_execstate *estate, PLpgSQL_expr *expr);
static void exec_save_simple_expr(PLpgSQL_expr *expr, CachedPlan *cplan);
static void exec_check_rw_parameter(PLpgSQL_expr *expr, int target_dno);
nfields = 0;
i = 0;
- foreach (lc, funcexpr->args)
+ foreach(lc, funcexpr->args)
{
- Node *n = lfirst(lc);
+ Node *n = lfirst(lc);
if (argmodes && argmodes[i] == PROARGMODE_INOUT)
{
error_context_stack = &plerrcontext;
/*
- * For a procedure or function declared to return void, the Python return value
- * must be None. For void-returning functions, we also treat a None
- * return value as a special "void datum" rather than NULL (as is the
- * case for non-void-returning functions).
+ * For a procedure or function declared to return void, the Python
+ * return value must be None. For void-returning functions, we also
+ * treat a None return value as a special "void datum" rather than
+ * NULL (as is the case for non-void-returning functions).
*/
if (proc->result.typoid == VOIDOID)
{
static int pltcl_subtransaction(ClientData cdata, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int pltcl_commit(ClientData cdata, Tcl_Interp *interp,
- int objc, Tcl_Obj *const objv[]);
+ int objc, Tcl_Obj *const objv[]);
static int pltcl_rollback(ClientData cdata, Tcl_Interp *interp,
- int objc, Tcl_Obj *const objv[]);
+ int objc, Tcl_Obj *const objv[]);
static void pltcl_subtrans_begin(MemoryContext oldcontext,
ResourceOwner oldowner);
nfalsepos = nfalsepos_for_missing_strings(filter, nelements);
ereport((nfalsepos > nelements * FPOSITIVE_THRESHOLD) ? WARNING : DEBUG1,
- (errmsg_internal("seed: " UINT64_FORMAT " false positives: " INT64_FORMAT " (%.6f%%) bitset %.2f%% set" ,
+ (errmsg_internal("seed: " UINT64_FORMAT " false positives: " INT64_FORMAT " (%.6f%%) bitset %.2f%% set",
seed, nfalsepos, (double) nfalsepos / nelements,
100.0 * bloom_prop_bits_set(filter))));