/*
* If the foreign table is a partition, we need to create a new RTE
* describing the foreign table for use by deparseInsertSql and
- * create_foreign_modify() below, after first copying the parent's
- * RTE and modifying some fields to describe the foreign partition to
- * work on. However, if this is invoked by UPDATE, the existing RTE
- * may already correspond to this partition if it is one of the
- * UPDATE subplan target rels; in that case, we can just use the
- * existing RTE as-is.
+ * create_foreign_modify() below, after first copying the parent's RTE and
+ * modifying some fields to describe the foreign partition to work on.
+ * However, if this is invoked by UPDATE, the existing RTE may already
+ * correspond to this partition if it is one of the UPDATE subplan target
+ * rels; in that case, we can just use the existing RTE as-is.
*/
rte = list_nth(estate->es_range_table, resultRelation - 1);
if (rte->relid != RelationGetRelid(rel))
rte->relkind = RELKIND_FOREIGN_TABLE;
/*
- * For UPDATE, we must use the RT index of the first subplan
- * target rel's RTE, because the core code would have built
- * expressions for the partition, such as RETURNING, using that
- * RT index as varno of Vars contained in those expressions.
+ * For UPDATE, we must use the RT index of the first subplan target
+ * rel's RTE, because the core code would have built expressions for
+ * the partition, such as RETURNING, using that RT index as varno of
+ * Vars contained in those expressions.
*/
if (plan && plan->operation == CMD_UPDATE &&
resultRelation == plan->nominalRelation)
metapage = BufferGetPage(metabuffer);
/*
- * An insertion to the pending list could logically belong anywhere in
- * the tree, so it conflicts with all serializable scans. All scans
- * acquire a predicate lock on the metabuffer to represent that.
+ * An insertion to the pending list could logically belong anywhere in the
+ * tree, so it conflicts with all serializable scans. All scans acquire a
+ * predicate lock on the metabuffer to represent that.
*/
CheckForSerializableConflictIn(index, NULL, metabuffer);
LockBuffer(stack->buffer, GIN_UNLOCK);
/*
- * Acquire predicate lock on the posting tree. We already hold
- * a lock on the entry page, but insertions to the posting tree
+ * Acquire predicate lock on the posting tree. We already hold a
+ * lock on the entry page, but insertions to the posting tree
* don't check for conflicts on that level.
*/
PredicateLockPage(btree->index, rootPostingTree, snapshot);
*ntids = 0;
/*
- * Acquire predicate lock on the metapage, to conflict with any
- * fastupdate insertions.
+ * Acquire predicate lock on the metapage, to conflict with any fastupdate
+ * insertions.
*/
PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot);
/*
* If table receives enough insertions and no cleanup was performed,
- * then index would appear have stale statistics. If scale factor
- * is set, we avoid that by performing cleanup if the number of
- * inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction
- * of original tuples count.
+ * then index would appear have stale statistics. If scale factor is
+ * set, we avoid that by performing cleanup if the number of inserted
+ * tuples exceeds vacuum_cleanup_index_scale_factor fraction of
+ * original tuples count.
*/
relopts = (StdRdOptions *) info->index->rd_options;
cleanup_scale_factor = (relopts &&
&oldestBtpoXact);
/*
- * Update cleanup-related information in metapage. This information
- * is used only for cleanup but keeping them up to date can avoid
+ * Update cleanup-related information in metapage. This information is
+ * used only for cleanup but keeping them up to date can avoid
* unnecessary cleanup even after bulkdelete.
*/
_bt_update_meta_cleanup_info(info->index, oldestBtpoXact,
* non-zero, or when there is no explicit representation and the
* tuple is evidently not a pre-pg_upgrade tuple.
*
- * Prior to v11, downlinks always had P_HIKEY as their offset.
- * Use that to decide if the tuple is a pre-v11 tuple.
+ * Prior to v11, downlinks always had P_HIKEY as their offset. Use
+ * that to decide if the tuple is a pre-v11 tuple.
*/
return BTreeTupleGetNAtts(itup, rel) == 0 ||
((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
errmsg("could not read from control file: %m")));
else
ereport(PANIC,
- (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
+ (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
}
pgstat_report_wait_end();
}
/*
- * Check that the address on the page agrees with what we expected.
- * This check typically fails when an old WAL segment is recycled,
- * and hasn't yet been overwritten with new data yet.
+ * Check that the address on the page agrees with what we expected. This
+ * check typically fails when an old WAL segment is recycled, and hasn't
+ * yet been overwritten with new data yet.
*/
if (hdr->xlp_pageaddr != recaddr)
{
*/
foreach(lc, childStmt->indexParams)
{
- IndexElem *ielem = lfirst(lc);
+ IndexElem *ielem = lfirst(lc);
/*
* If the index parameter is an expression, we must
EState *estate, bool emitError)
{
ExprContext *econtext;
- bool success;
+ bool success;
/*
* If first time through, build expression state tree for the partition
else
{
/*
- * We already saw this transaction, but initially added it to the list
- * of top-level txns. Now that we know it's not top-level, remove
- * it from there.
+ * We already saw this transaction, but initially added it to the
+ * list of top-level txns. Now that we know it's not top-level,
+ * remove it from there.
*/
dlist_delete(&subtxn->node);
}
/*
* If we wished to exclude xids this would be the right place for it.
* Procs with the PROC_IN_VACUUM flag set don't usually assign xids,
- * but they do during truncation at the end when they get the lock
- * and truncate, so it is not much of a problem to include them if they
+ * but they do during truncation at the end when they get the lock and
+ * truncate, so it is not much of a problem to include them if they
* are seen and it is cleaner to include them.
*/
*/
typedef struct RecoveryLockListsEntry
{
- TransactionId xid;
- List *locks;
+ TransactionId xid;
+ List *locks;
} RecoveryLockListsEntry;
/*
InitRecoveryTransactionEnvironment(void)
{
VirtualTransactionId vxid;
- HASHCTL hash_ctl;
+ HASHCTL hash_ctl;
/*
* Initialize the hash table for tracking the list of locks held by each
{
xl_standby_lock *lock = (xl_standby_lock *) linitial(locks);
LOCKTAG locktag;
+
elog(trace_recovery(DEBUG4),
"releasing recovery lock: xid %u db %u rel %u",
lock->xid, lock->dbOid, lock->relOid);
void
StandbyReleaseAllLocks(void)
{
- HASH_SEQ_STATUS status;
+ HASH_SEQ_STATUS status;
RecoveryLockListsEntry *entry;
elog(trace_recovery(DEBUG2), "release all standby locks");
void
StandbyReleaseOldLocks(TransactionId oldxid)
{
- HASH_SEQ_STATUS status;
+ HASH_SEQ_STATUS status;
RecoveryLockListsEntry *entry;
hash_seq_init(&status, RecoveryLockLists);
* acquire locks early to avoid deadlocks.
*
* We also take the opportunity to verify that all
- * partitions are something we can put an index on,
- * to avoid building some indexes only to fail later.
+ * partitions are something we can put an index on, to
+ * avoid building some indexes only to fail later.
*/
if (stmt->relation->inh &&
get_rel_relkind(relid) == RELKIND_PARTITIONED_TABLE)
inheritors = find_all_inheritors(relid, lockmode, NULL);
foreach(lc, inheritors)
{
- char relkind = get_rel_relkind(lfirst_oid(lc));
+ char relkind = get_rel_relkind(lfirst_oid(lc));
if (relkind != RELKIND_RELATION &&
relkind != RELKIND_MATVIEW &&
{
static const struct
{
- enum jbvType type;
- const char *msg;
+ enum jbvType type;
+ const char *msg;
}
- messages[] =
+ messages[] =
{
- { jbvNull, gettext_noop("cannot cast jsonb null to type %s") },
- { jbvString, gettext_noop("cannot cast jsonb string to type %s") },
- { jbvNumeric, gettext_noop("cannot cast jsonb numeric to type %s") },
- { jbvBool, gettext_noop("cannot cast jsonb boolean to type %s") },
- { jbvArray, gettext_noop("cannot cast jsonb array to type %s") },
- { jbvObject, gettext_noop("cannot cast jsonb object to type %s") },
- { jbvBinary, gettext_noop("cannot cast jsonb array or object to type %s") }
+ {jbvNull, gettext_noop("cannot cast jsonb null to type %s")},
+ {jbvString, gettext_noop("cannot cast jsonb string to type %s")},
+ {jbvNumeric, gettext_noop("cannot cast jsonb numeric to type %s")},
+ {jbvBool, gettext_noop("cannot cast jsonb boolean to type %s")},
+ {jbvArray, gettext_noop("cannot cast jsonb array to type %s")},
+ {jbvObject, gettext_noop("cannot cast jsonb object to type %s")},
+ {jbvBinary, gettext_noop("cannot cast jsonb array or object to type %s")}
};
- int i;
+ int i;
- for(i=0; i<lengthof(messages); i++)
+ for (i = 0; i < lengthof(messages); i++)
if (messages[i].type == type)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg(messages[i].msg, sqltype)));
/* should be unreachable */
- elog(ERROR, "unknown jsonb type: %d", (int)type);
+ elog(ERROR, "unknown jsonb type: %d", (int) type);
}
Datum
char **attoptions; /* per-attribute options */
Oid *attcollation; /* per-attribute collation selection */
char **attfdwoptions; /* per-attribute fdw options */
- char **attmissingval; /* per attribute missing value */
+ char **attmissingval; /* per attribute missing value */
bool *notnull; /* NOT NULL constraints on attributes */
bool *inhNotNull; /* true if NOT NULL is inherited */
struct _attrDefInfo **attrdefs; /* DEFAULT expressions */
/* ALTER INDEX <foo> SET|RESET ( */
else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "("))
COMPLETE_WITH_LIST8("fillfactor", "recheck_on_update",
- "vacuum_cleanup_index_scale_factor", /* BTREE */
+ "vacuum_cleanup_index_scale_factor", /* BTREE */
"fastupdate", "gin_pending_list_limit", /* GIN */
"buffering", /* GiST */
"pages_per_range", "autosummarize" /* BRIN */
);
else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "("))
COMPLETE_WITH_LIST8("fillfactor =", "recheck_on_update =",
- "vacuum_cleanup_index_scale_factor =", /* BTREE */
+ "vacuum_cleanup_index_scale_factor =", /* BTREE */
"fastupdate =", "gin_pending_list_limit =", /* GIN */
"buffering =", /* GiST */
"pages_per_range =", "autosummarize =" /* BRIN */
/* Validate a page */
extern bool XLogReaderValidatePageHeader(XLogReaderState *state,
- XLogRecPtr recptr, char *phdr);
+ XLogRecPtr recptr, char *phdr);
/* Invalidate read state */
extern void XLogReaderInvalReadState(XLogReaderState *state);
*/
Snapshot base_snapshot;
XLogRecPtr base_snapshot_lsn;
- dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */
+ dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */
/*
* How many ReorderBufferChange's do we have in this txn.