static GBT_VARKEY *
gbt_bit_l2n(GBT_VARKEY *leaf)
{
-
GBT_VARKEY *out = leaf;
GBT_VARKEY_R r = gbt_var_key_readable(leaf);
bytea *o;
Datum
gbt_bpchar_compress(PG_FUNCTION_ARGS)
{
-
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval;
Datum
gbt_ts_penalty(PG_FUNCTION_ARGS)
{
-
tsKEY *origentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
tsKEY *newentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *result = (float *) PG_GETARG_POINTER(2);
bool
gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo)
{
-
GBT_NUMKEY_R b1,
b2;
void
gbt_num_bin_union(Datum *u, GBT_NUMKEY *e, const gbtree_ninfo *tinfo)
{
-
GBT_NUMKEY_R rd;
rd.lower = &e[0];
GBT_VARKEY_R
gbt_var_key_readable(const GBT_VARKEY *k)
{
-
GBT_VARKEY_R r;
r.lower = (bytea *) &(((char *) k)[VARHDRSZ]);
GISTENTRY *
gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo)
{
-
GISTENTRY *retval;
if (entry->leafkey)
gbt_var_union(const GistEntryVector *entryvec, int32 *size, Oid collation,
const gbtree_vinfo *tinfo)
{
-
int i = 0,
numranges = entryvec->n;
GBT_VARKEY *cur;
ereport(ERROR,
(errcode(ERRCODE_FDW_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("could not get libpq's default connection options")));
+ errdetail("could not get libpq's default connection options")));
}
/* Validate each supplied option. */
if (!is_valid_dblink_option(options, def->defname, context))
{
/*
- * Unknown option, or invalid option for the context specified,
- * so complain about it. Provide a hint with list of valid
- * options for the context.
+ * Unknown option, or invalid option for the context specified, so
+ * complain about it. Provide a hint with list of valid options
+ * for the context.
*/
StringInfoData buf;
const PQconninfoOption *opt;
char **filename, List **other_options);
static List *get_file_fdw_attribute_options(Oid relid);
static bool check_selective_binary_conversion(RelOptInfo *baserel,
- Oid foreigntableid,
- List **columns);
+ Oid foreigntableid,
+ List **columns);
static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
FileFdwPlanState *fdw_private);
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
&startup_cost, &total_cost);
/*
- * Create a ForeignPath node and add it as only possible path. We use the
+ * Create a ForeignPath node and add it as only possible path. We use the
* fdw_private list of the path to carry the convert_selectively option;
* it will be propagated into the fdw_private list of the Plan node.
*/
/* Add all the attributes used by restriction clauses. */
foreach(lc, baserel->baserestrictinfo)
{
- RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
pull_varattnos((Node *) rinfo->clause, baserel->relid,
&attrs_used);
* digit as numeric - could be a zip code or similar
*/
if (src->len > 0 &&
- !(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) &&
+ !(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) &&
strspn(src->data, "+-0123456789Ee.") == src->len)
{
/*
* value. Ignore any actual parsed value.
*/
char *endptr = "junk";
- long lval;
+ long lval;
- lval = strtol(src->data, &endptr, 10);
+ lval = strtol(src->data, &endptr, 10);
(void) lval;
if (*endptr == '\0')
{
else
{
/* not an int - try a double */
- double dval;
+ double dval;
dval = strtod(src->data, &endptr);
(void) dval;
{
eary ->alloc *= 2;
eary ->array = (char **) pg_realloc(eary->array,
- eary->alloc * sizeof(char *));
+ eary->alloc * sizeof(char *));
}
eary ->array[eary->num] = pg_strdup(eltname);
dryrun = true;
break;
case 'x':
- additional_ext = strdup(optarg); /* Extension to remove from
- * xlogfile names */
+ additional_ext = strdup(optarg); /* Extension to remove
+ * from xlogfile names */
break;
default:
fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
* There's no way to trigger failover via signal on Windows.
*/
(void) pqsignal(SIGUSR1, sighandler);
- (void) pqsignal(SIGINT, sighandler); /* deprecated, use SIGUSR1 */
+ (void) pqsignal(SIGINT, sighandler); /* deprecated, use SIGUSR1 */
(void) pqsignal(SIGQUIT, sigquit_handler);
#endif
static void output(uint64 loop_count);
/* record duration in powers of 2 microseconds */
-int64 histogram[32];
+int64 histogram[32];
int
main(int argc, char *argv[])
uint64 total_time;
int64 time_elapsed = 0;
uint64 loop_count = 0;
- uint64 prev, cur;
- instr_time start_time, end_time, temp;
+ uint64 prev,
+ cur;
+ instr_time start_time,
+ end_time,
+ temp;
total_time = duration > 0 ? duration * 1000000 : 0;
while (time_elapsed < total_time)
{
- int32 diff, bits = 0;
+ int32 diff,
+ bits = 0;
prev = cur;
INSTR_TIME_SET_CURRENT(temp);
static void
output(uint64 loop_count)
{
- int64 max_bit = 31, i;
+ int64 max_bit = 31,
+ i;
/* find highest bit value */
while (max_bit > 0 && histogram[max_bit] == 0)
max_bit--;
-
+
printf("Histogram of timing durations:\n");
printf("%6s %10s %10s\n", "< usec", "% of total", "count");
/* lame hack to work around INT64_FORMAT deficiencies */
snprintf(buf, sizeof(buf), INT64_FORMAT, histogram[i]);
printf("%6ld %9.5f %10s\n", 1l << i,
- (double) histogram[i] * 100 / loop_count, buf);
+ (double) histogram[i] * 100 / loop_count, buf);
}
}
else
{
/*
- * Back up endword to the escape character when stopping at
- * an escaped char, so that subsequent get_wildcard_part will
+ * Back up endword to the escape character when stopping at an
+ * escaped char, so that subsequent get_wildcard_part will
* restart from the escape character. We assume here that
* escape chars are single-byte.
*/
* fix_path_separator
* For non-Windows, just return the argument.
* For Windows convert any forward slash to a backslash
- * such as is suitable for arguments to builtin commands
+ * such as is suitable for arguments to builtin commands
* like RMDIR and DEL.
*/
static char *
{
#ifdef WIN32
- char *result;
- char *c;
+ char *result;
+ char *c;
result = pg_strdup(path);
*c = '\\';
return result;
-
#else
return path;
-
#endif
}
check_is_super_user(&new_cluster);
/*
- * We don't restore our own user, so both clusters must match have
- * matching install-user oids.
+ * We don't restore our own user, so both clusters must match have
+ * matching install-user oids.
*/
if (old_cluster.install_role_oid != new_cluster.install_role_oid)
pg_log(PG_FATAL,
- "Old and new cluster install users have different values for pg_authid.oid.\n");
+ "Old and new cluster install users have different values for pg_authid.oid.\n");
/*
- * We only allow the install user in the new cluster because other
- * defined users might match users defined in the old cluster and
- * generate an error during pg_dump restore.
+ * We only allow the install user in the new cluster because other defined
+ * users might match users defined in the old cluster and generate an
+ * error during pg_dump restore.
*/
if (new_cluster.role_count != 1)
pg_log(PG_FATAL, "Only the install user can be defined in the new cluster.\n");
-
+
check_for_prepared_transactions(&new_cluster);
}
if (deletion_script_file_name)
pg_log(PG_REPORT,
- "Running this script will delete the old cluster's data files:\n"
+ "Running this script will delete the old cluster's data files:\n"
" %s\n",
deletion_script_file_name);
else
pg_log(PG_REPORT,
"Could not create a script to delete the old cluster's data\n"
- "files because user-defined tablespaces exist in the old cluster\n"
- "directory. The old cluster's contents must be deleted manually.\n");
+ "files because user-defined tablespaces exist in the old cluster\n"
+ "directory. The old cluster's contents must be deleted manually.\n");
}
/* We read the real port number for PG >= 9.1 */
if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
old_cluster.port == DEF_PGUPORT)
- pg_log(PG_FATAL, "When checking a pre-PG 9.1 live old server, "
- "you must specify the old server's port number.\n");
+ pg_log(PG_FATAL, "When checking a pre-PG 9.1 live old server, "
+ "you must specify the old server's port number.\n");
if (live_check && old_cluster.port == new_cluster.port)
pg_log(PG_FATAL, "When checking a live server, "
if (GET_MAJOR_VERSION(cluster->major_version) < 902)
{
/*
- * Pre-9.2 did not canonicalize the supplied locale names
- * to match what the system returns, while 9.2+ does, so
- * convert pre-9.2 to match.
+ * Pre-9.2 did not canonicalize the supplied locale names to match
+ * what the system returns, while 9.2+ does, so convert pre-9.2 to
+ * match.
*/
ctrl->lc_collate = get_canonical_locale_name(LC_COLLATE,
- pg_strdup(PQgetvalue(res, 0, i_datcollate)));
+ pg_strdup(PQgetvalue(res, 0, i_datcollate)));
ctrl->lc_ctype = get_canonical_locale_name(LC_CTYPE,
- pg_strdup(PQgetvalue(res, 0, i_datctype)));
- }
+ pg_strdup(PQgetvalue(res, 0, i_datctype)));
+ }
else
{
- ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
+ ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));
}
ControlData *newctrl)
{
/*
- * These are often defined with inconsistent case, so use pg_strcasecmp().
- * They also often use inconsistent hyphenation, which we cannot fix, e.g.
- * UTF-8 vs. UTF8, so at least we display the mismatching values.
+ * These are often defined with inconsistent case, so use pg_strcasecmp().
+ * They also often use inconsistent hyphenation, which we cannot fix, e.g.
+ * UTF-8 vs. UTF8, so at least we display the mismatching values.
*/
if (pg_strcasecmp(oldctrl->lc_collate, newctrl->lc_collate) != 0)
pg_log(PG_FATAL,
- "lc_collate cluster values do not match: old \"%s\", new \"%s\"\n",
+ "lc_collate cluster values do not match: old \"%s\", new \"%s\"\n",
oldctrl->lc_collate, newctrl->lc_collate);
if (pg_strcasecmp(oldctrl->lc_ctype, newctrl->lc_ctype) != 0)
pg_log(PG_FATAL,
- "lc_ctype cluster values do not match: old \"%s\", new \"%s\"\n",
+ "lc_ctype cluster values do not match: old \"%s\", new \"%s\"\n",
oldctrl->lc_ctype, newctrl->lc_ctype);
if (pg_strcasecmp(oldctrl->encoding, newctrl->encoding) != 0)
pg_log(PG_FATAL,
- "encoding cluster values do not match: old \"%s\", new \"%s\"\n",
+ "encoding cluster values do not match: old \"%s\", new \"%s\"\n",
oldctrl->encoding, newctrl->encoding);
}
SCRIPT_EXT);
/*
- * Some users (oddly) create tablespaces inside the cluster data
- * directory. We can't create a proper old cluster delete script
- * in that case.
+ * Some users (oddly) create tablespaces inside the cluster data
+ * directory. We can't create a proper old cluster delete script in that
+ * case.
*/
strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
canonicalize_path(old_cluster_pgdata);
for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
{
char old_tablespace_dir[MAXPGPATH];
-
+
strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
canonicalize_path(old_tablespace_dir);
if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
/* remove PG_VERSION? */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
fprintf(script, RM_CMD " %s%s%cPG_VERSION\n",
- fix_path_separator(os_info.old_tablespaces[tblnum]),
+ fix_path_separator(os_info.old_tablespaces[tblnum]),
fix_path_separator(old_cluster.tablespace_suffix),
PATH_SEPARATOR);
* or a version-specific subdirectory.
*/
fprintf(script, RMDIR_CMD " %s%s\n",
- fix_path_separator(os_info.old_tablespaces[tblnum]),
+ fix_path_separator(os_info.old_tablespaces[tblnum]),
fix_path_separator(old_cluster.tablespace_suffix));
}
save = setlocale(category, NULL);
if (!save)
- pg_log(PG_FATAL, "failed to get the current locale\n");
+ pg_log(PG_FATAL, "failed to get the current locale\n");
/* 'save' may be pointing at a modifiable scratch variable, so copy it. */
save = pg_strdup(save);
res = setlocale(category, locale);
if (!res)
- pg_log(PG_FATAL, "failed to get system local name for \"%s\"\n", res);
+ pg_log(PG_FATAL, "failed to get system local name for \"%s\"\n", res);
res = pg_strdup(res);
/* restore old value. */
if (!setlocale(category, save))
- pg_log(PG_FATAL, "failed to restore old locale \"%s\"\n", save);
+ pg_log(PG_FATAL, "failed to restore old locale \"%s\"\n", save);
pg_free(save);
pg_free(lc_messages);
/*
- * Before 9.3, pg_resetxlog reported the xlogid and segno of the first
- * log file after reset as separate lines. Starting with 9.3, it reports
- * the WAL file name. If the old cluster is older than 9.3, we construct
- * the WAL file name from the xlogid and segno.
+ * Before 9.3, pg_resetxlog reported the xlogid and segno of the first log
+ * file after reset as separate lines. Starting with 9.3, it reports the
+ * WAL file name. If the old cluster is older than 9.3, we construct the
+ * WAL file name from the xlogid and segno.
*/
if (GET_MAJOR_VERSION(cluster->major_version) <= 902)
{
!got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version)
{
pg_log(PG_REPORT,
- "The %s cluster lacks some required control information:\n",
- CLUSTER_NAME(cluster));
+ "The %s cluster lacks some required control information:\n",
+ CLUSTER_NAME(cluster));
if (!got_xid)
pg_log(PG_REPORT, " checkpoint next XID\n");
{
if (oldctrl->align == 0 || oldctrl->align != newctrl->align)
pg_log(PG_FATAL,
- "old and new pg_controldata alignments are invalid or do not match\n"
+ "old and new pg_controldata alignments are invalid or do not match\n"
"Likely one cluster is a 32-bit install, the other 64-bit\n");
if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz)
"options.\n");
}
- /* We might eventually allow upgrades from checksum to no-checksum clusters. */
+ /*
+ * We might eventually allow upgrades from checksum to no-checksum
+ * clusters.
+ */
if (oldctrl->data_checksum_version != newctrl->data_checksum_version)
{
pg_log(PG_FATAL,
{
int result;
int written;
+
#define MAXCMDLEN (2 * MAXPGPATH)
char cmd[MAXCMDLEN];
mode_t old_umask = 0;
#ifdef WIN32
{
- /*
- * "pg_ctl -w stop" might have reported that the server has
- * stopped because the postmaster.pid file has been removed,
- * but "pg_ctl -w start" might still be in the process of
- * closing and might still be holding its stdout and -l log
- * file descriptors open. Therefore, try to open the log
- * file a few more times.
+ /*
+ * "pg_ctl -w stop" might have reported that the server has stopped
+ * because the postmaster.pid file has been removed, but "pg_ctl -w
+ * start" might still be in the process of closing and might still be
+ * holding its stdout and -l log file descriptors open. Therefore,
+ * try to open the log file a few more times.
*/
- int iter;
+ int iter;
+
for (iter = 0; iter < 4 && log == NULL; iter++)
{
sleep(1);
}
#ifndef WIN32
- /*
- * We can't do this on Windows because it will keep the "pg_ctl start"
- * output filename open until the server stops, so we do the \n\n above
- * on that platform. We use a unique filename for "pg_ctl start" that is
- * never reused while the server is running, so it works fine. We could
- * log these commands to a third file, but that just adds complexity.
+
+ /*
+ * We can't do this on Windows because it will keep the "pg_ctl start"
+ * output filename open until the server stops, so we do the \n\n above on
+ * that platform. We use a unique filename for "pg_ctl start" that is
+ * never reused while the server is running, so it works fine. We could
+ * log these commands to a third file, but that just adds complexity.
*/
if ((log = fopen_priv(log_file, "a")) == NULL)
pg_log(PG_FATAL, "cannot write to log file %s\n", log_file);
void
verify_directories(void)
{
-
#ifndef WIN32
if (access(".", R_OK | W_OK | X_OK) != 0)
#else
static int
copy_file(const char *srcfile, const char *dstfile, bool force)
{
-
#define COPY_BUF_SIZE (50 * BLCKSZ)
int src_fd;
int dest_fd;
char *buffer;
int ret = 0;
- int save_errno = 0;
+ int save_errno = 0;
if ((srcfile == NULL) || (dstfile == NULL))
return -1;
* table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
* 9.0, TOAST relation names always use heap table oids, hence we
* cannot check relation names when upgrading from pre-9.0. Clusters
- * upgraded to 9.0 will get matching TOAST names.
- * If index names don't match primary key constraint names, this will
- * fail because pg_dump dumps constraint names and pg_upgrade checks
- * index names.
+ * upgraded to 9.0 will get matching TOAST names. If index names don't
+ * match primary key constraint names, this will fail because pg_dump
+ * dumps constraint names and pg_upgrade checks index names.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
num_maps++;
}
- /* Do this check after the loop so hopefully we will produce a clearer error above */
+ /*
+ * Do this check after the loop so hopefully we will produce a clearer
+ * error above
+ */
if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
pg_log(PG_FATAL, "old and new databases \"%s\" have a different number of relations\n",
old_db->db_name);
"LEFT OUTER JOIN pg_catalog.pg_index i "
" ON c.oid = i.indexrelid "
"WHERE relkind IN ('r', 'm', 'i'%s) AND "
- /* pg_dump only dumps valid indexes; testing indisready is
- * necessary in 9.2, and harmless in earlier/later versions. */
+
+ /*
+ * pg_dump only dumps valid indexes; testing indisready is necessary in
+ * 9.2, and harmless in earlier/later versions.
+ */
" i.indisvalid IS DISTINCT FROM false AND "
" i.indisready IS DISTINCT FROM false AND "
/* exclude possible orphaned temp tables */
PQclear(executeQueryOrDie(conn, "%s", query));
/*
- * Get TOAST tables and indexes; we have to gather the TOAST tables in
- * later steps because we can't schema-qualify TOAST tables.
+ * Get TOAST tables and indexes; we have to gather the TOAST tables in
+ * later steps because we can't schema-qualify TOAST tables.
*/
PQclear(executeQueryOrDie(conn,
"INSERT INTO info_rels "
/* we preserve pg_class.oid so we sort by it to match old/new */
"ORDER BY 1;",
/* 9.2 removed the spclocation column */
- (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
- "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
+ (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
+ "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
res = executeQueryOrDie(conn, "%s", query);
for (relnum = 0; relnum < rel_arr->nrels; relnum++)
pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n",
rel_arr->rels[relnum].nspname, rel_arr->rels[relnum].relname,
- rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
+ rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
}
}
/*
- * Trim off any trailing path separators because we construct paths
- * by appending to this path.
+ * Trim off any trailing path separators because we construct paths by
+ * appending to this path.
*/
#ifndef WIN32
if ((*dirpath)[strlen(*dirpath) - 1] == '/')
get_sock_dir(ClusterInfo *cluster, bool live_check)
{
#ifdef HAVE_UNIX_SOCKETS
+
/*
- * sockdir and port were added to postmaster.pid in PG 9.1.
- * Pre-9.1 cannot process pg_ctl -w for sockets in non-default
- * locations.
+ * sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot
+ * process pg_ctl -w for sockets in non-default locations.
*/
if (GET_MAJOR_VERSION(cluster->major_version) >= 901)
{
else
{
/*
- * If we are doing a live check, we will use the old cluster's Unix
- * domain socket directory so we can connect to the live server.
+ * If we are doing a live check, we will use the old cluster's
+ * Unix domain socket directory so we can connect to the live
+ * server.
*/
unsigned short orig_port = cluster->port;
- char filename[MAXPGPATH], line[MAXPGPATH];
- FILE *fp;
+ char filename[MAXPGPATH],
+ line[MAXPGPATH];
+ FILE *fp;
int lineno;
-
+
snprintf(filename, sizeof(filename), "%s/postmaster.pid",
cluster->pgdata);
if ((fp = fopen(filename, "r")) == NULL)
pg_log(PG_FATAL, "Cannot open file %s: %m\n", filename);
-
+
for (lineno = 1;
- lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR);
+ lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR);
lineno++)
{
if (fgets(line, sizeof(line), fp) == NULL)
pg_log(PG_FATAL, "Cannot read line %d from %s: %m\n", lineno, filename);
-
+
/* potentially overwrite user-supplied value */
if (lineno == LOCK_FILE_LINE_PORT)
sscanf(line, "%hu", &old_cluster.port);
}
}
fclose(fp);
-
+
/* warn of port number correction */
if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port)
pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n",
- orig_port, cluster->port);
+ orig_port, cluster->port);
}
}
else
- /* Can't get sockdir and pg_ctl -w can't use a non-default, use default */
- cluster->sockdir = NULL;
-#else /* !HAVE_UNIX_SOCKETS */
+ /*
+ * Can't get sockdir and pg_ctl -w can't use a non-default, use
+ * default
+ */
+ cluster->sockdir = NULL;
+#else /* !HAVE_UNIX_SOCKETS */
cluster->sockdir = NULL;
#endif
}
if (newPageVersion != oldPageVersion)
{
/*
- * The clusters use differing page layouts, see if we can find a plugin
- * that knows how to convert from the old page layout to the new page
- * layout.
+ * The clusters use differing page layouts, see if we can find a
+ * plugin that knows how to convert from the old page layout to the
+ * new page layout.
*/
-
+
if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL)
pg_log(PG_FATAL, "could not find plugin to convert from old page layout to new page layout\n");
}
}
-
-
#endif
#include <io.h>
#endif
-static int parallel_jobs;
+static int parallel_jobs;
#ifdef WIN32
/*
* it can be passed to WaitForMultipleObjects(). We use two arrays
* so the thread_handles array can be passed to WaitForMultipleObjects().
*/
-HANDLE *thread_handles;
+HANDLE *thread_handles;
-typedef struct {
- char log_file[MAXPGPATH];
- char opt_log_file[MAXPGPATH];
- char cmd[MAX_STRING];
+typedef struct
+{
+ char log_file[MAXPGPATH];
+ char opt_log_file[MAXPGPATH];
+ char cmd[MAX_STRING];
} exec_thread_arg;
-typedef struct {
- DbInfoArr *old_db_arr;
- DbInfoArr *new_db_arr;
- char old_pgdata[MAXPGPATH];
- char new_pgdata[MAXPGPATH];
- char old_tablespace[MAXPGPATH];
+typedef struct
+{
+ DbInfoArr *old_db_arr;
+ DbInfoArr *new_db_arr;
+ char old_pgdata[MAXPGPATH];
+ char new_pgdata[MAXPGPATH];
+ char old_tablespace[MAXPGPATH];
} transfer_thread_arg;
exec_thread_arg **exec_thread_args;
transfer_thread_arg **transfer_thread_args;
/* track current thread_args struct so reap_child() can be used for all cases */
-void **cur_thread_args;
-
-DWORD win32_exec_prog(exec_thread_arg *args);
-DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args);
+void **cur_thread_args;
+DWORD win32_exec_prog(exec_thread_arg *args);
+DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args);
#endif
/*
{
va_list args;
char cmd[MAX_STRING];
+
#ifndef WIN32
pid_t child;
#else
HANDLE child;
- exec_thread_arg *new_arg;
+ exec_thread_arg *new_arg;
#endif
va_start(args, fmt);
{
/* parallel */
#ifdef WIN32
- cur_thread_args = (void **)exec_thread_args;
-#endif
+ cur_thread_args = (void **) exec_thread_args;
+#endif
/* harvest any dead children */
while (reap_child(false) == true)
;
/* must we wait for a dead child? */
if (parallel_jobs >= user_opts.jobs)
reap_child(true);
-
+
/* set this before we start the job */
parallel_jobs++;
-
+
/* Ensure stdio state is quiesced before forking */
fflush(NULL);
#else
if (thread_handles == NULL)
{
- int i;
+ int i;
thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *));
/*
- * For safety and performance, we keep the args allocated during
- * the entire life of the process, and we don't free the args
- * in a thread different from the one that allocated it.
+ * For safety and performance, we keep the args allocated during
+ * the entire life of the process, and we don't free the args in a
+ * thread different from the one that allocated it.
*/
for (i = 0; i < user_opts.jobs; i++)
exec_thread_args[i] = pg_malloc(sizeof(exec_thread_arg));
}
/* use first empty array element */
- new_arg = exec_thread_args[parallel_jobs-1];
+ new_arg = exec_thread_args[parallel_jobs - 1];
/* Can only pass one pointer into the function, so use a struct */
strcpy(new_arg->log_file, log_file);
strcpy(new_arg->cmd, cmd);
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
- new_arg, 0, NULL);
+ new_arg, 0, NULL);
if (child == 0)
pg_log(PG_FATAL, "could not create worker thread: %s\n", strerror(errno));
- thread_handles[parallel_jobs-1] = child;
+ thread_handles[parallel_jobs - 1] = child;
#endif
}
DWORD
win32_exec_prog(exec_thread_arg *args)
{
- int ret;
+ int ret;
ret = !exec_prog(args->log_file, args->opt_log_file, true, "%s", args->cmd);
* This has the same API as transfer_all_new_dbs, except it does parallel execution
* by transfering multiple tablespaces in parallel
*/
-void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
- char *old_pgdata, char *new_pgdata,
- char *old_tablespace)
+void
+parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+ char *old_pgdata, char *new_pgdata,
+ char *old_tablespace)
{
#ifndef WIN32
pid_t child;
#else
HANDLE child;
- transfer_thread_arg *new_arg;
+ transfer_thread_arg *new_arg;
#endif
if (user_opts.jobs <= 1)
{
/* parallel */
#ifdef WIN32
- cur_thread_args = (void **)transfer_thread_args;
+ cur_thread_args = (void **) transfer_thread_args;
#endif
/* harvest any dead children */
while (reap_child(false) == true)
/* must we wait for a dead child? */
if (parallel_jobs >= user_opts.jobs)
reap_child(true);
-
+
/* set this before we start the job */
parallel_jobs++;
-
+
/* Ensure stdio state is quiesced before forking */
fflush(NULL);
#else
if (thread_handles == NULL)
{
- int i;
+ int i;
thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *));
/*
- * For safety and performance, we keep the args allocated during
- * the entire life of the process, and we don't free the args
- * in a thread different from the one that allocated it.
+ * For safety and performance, we keep the args allocated during
+ * the entire life of the process, and we don't free the args in a
+ * thread different from the one that allocated it.
*/
for (i = 0; i < user_opts.jobs; i++)
transfer_thread_args[i] = pg_malloc(sizeof(transfer_thread_arg));
}
/* use first empty array element */
- new_arg = transfer_thread_args[parallel_jobs-1];
+ new_arg = transfer_thread_args[parallel_jobs - 1];
/* Can only pass one pointer into the function, so use a struct */
new_arg->old_db_arr = old_db_arr;
strcpy(new_arg->old_tablespace, old_tablespace);
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
- new_arg, 0, NULL);
+ new_arg, 0, NULL);
if (child == 0)
pg_log(PG_FATAL, "could not create worker thread: %s\n", strerror(errno));
- thread_handles[parallel_jobs-1] = child;
+ thread_handles[parallel_jobs - 1] = child;
#endif
}
reap_child(bool wait_for_child)
{
#ifndef WIN32
- int work_status;
- int ret;
+ int work_status;
+ int ret;
#else
- int thread_num;
- DWORD res;
+ int thread_num;
+ DWORD res;
#endif
if (user_opts.jobs <= 1 || parallel_jobs == 0)
if (WEXITSTATUS(work_status) != 0)
pg_log(PG_FATAL, "child worker exited abnormally: %s\n", strerror(errno));
-
#else
/* wait for one to finish */
thread_num = WaitForMultipleObjects(parallel_jobs, thread_handles,
- false, wait_for_child ? INFINITE : 0);
+ false, wait_for_child ? INFINITE : 0);
if (thread_num == WAIT_TIMEOUT || thread_num == WAIT_FAILED)
return false;
/* compute thread index in active_threads */
thread_num -= WAIT_OBJECT_0;
-
+
/* get the result */
GetExitCodeThread(thread_handles[thread_num], &res);
if (res != 0)
/* dispose of handle to stop leaks */
CloseHandle(thread_handles[thread_num]);
- /* Move last slot into dead child's position */
+ /* Move last slot into dead child's position */
if (thread_num != parallel_jobs - 1)
{
- void *tmp_args;
-
+ void *tmp_args;
+
thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
/*
- * We must swap the arg struct pointers because the thread we
- * just moved is active, and we must make sure it is not
- * reused by the next created thread. Instead, the new thread
- * will use the arg struct of the thread that just died.
+ * We must swap the arg struct pointers because the thread we just
+ * moved is active, and we must make sure it is not reused by the next
+ * created thread. Instead, the new thread will use the arg struct of
+ * the thread that just died.
*/
tmp_args = cur_thread_args[thread_num];
cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
disable_old_cluster();
transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
- old_cluster.pgdata, new_cluster.pgdata);
+ old_cluster.pgdata, new_cluster.pgdata);
/*
* Assuming OIDs are only used in system tables, there is no need to
if (pid_lock_file_exists(old_cluster.pgdata))
{
/*
- * If we have a postmaster.pid file, try to start the server. If
- * it starts, the pid file was stale, so stop the server. If it
- * doesn't start, assume the server is running. If the pid file
- * is left over from a server crash, this also allows any committed
- * transactions stored in the WAL to be replayed so they are not
- * lost, because WAL files are not transfered from old to new
- * servers.
- */
+ * If we have a postmaster.pid file, try to start the server. If it
+ * starts, the pid file was stale, so stop the server. If it doesn't
+ * start, assume the server is running. If the pid file is left over
+ * from a server crash, this also allows any committed transactions
+ * stored in the WAL to be replayed so they are not lost, because WAL
+ * files are not transfered from old to new servers.
+ */
if (start_postmaster(&old_cluster, false))
stop_postmaster(false);
else
stop_postmaster(false);
else
pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
- "Please shutdown that postmaster and try again.\n");
+ "Please shutdown that postmaster and try again.\n");
}
/* get path to pg_upgrade executable */
prep_status("Adding support functions to new cluster");
/*
- * Technically, we only need to install these support functions in new
- * databases that also exist in the old cluster, but for completeness
- * we process all new databases.
+ * Technically, we only need to install these support functions in new
+ * databases that also exist in the old cluster, but for completeness we
+ * process all new databases.
*/
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{
- char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
- DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
+ char sql_file_name[MAXPGPATH],
+ log_file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
pg_log(PG_STATUS, "%s", old_db->db_name);
snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
/*
- * pg_dump only produces its output at the end, so there is little
- * parallelism if using the pipe.
+ * pg_dump only produces its output at the end, so there is little
+ * parallelism if using the pipe.
*/
parallel_exec_prog(log_file_name, NULL,
- "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
- new_cluster.bindir, cluster_conn_opts(&new_cluster),
- old_db->db_name, sql_file_name);
+ "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
+ new_cluster.bindir, cluster_conn_opts(&new_cluster),
+ old_db->db_name, sql_file_name);
}
/* reap all children */
copy_subdir_files("pg_multixact/offsets");
copy_subdir_files("pg_multixact/members");
prep_status("Setting next multixact ID and offset for new cluster");
+
/*
* we preserve all files and contents, so we must preserve both "next"
* counters here and the oldest multi present on system.
else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
{
prep_status("Setting oldest multixact ID on new cluster");
+
/*
* We don't preserve files in this case, but it's important that the
* oldest multi is set to the latest value used by the old system, so
static void
cleanup(void)
{
-
fclose(log_opts.internal);
/* Remove dump and log files? */
if (old_cluster.dbarr.dbs)
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{
- char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
- DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
+ char sql_file_name[MAXPGPATH],
+ log_file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
unlink(sql_file_name);
#define pg_copy_file copy_file
#define pg_mv_file rename
#define pg_link_file link
-#define PATH_SEPARATOR '/'
+#define PATH_SEPARATOR '/'
#define RM_CMD "rm -f"
#define RMDIR_CMD "rm -rf"
#define SCRIPT_EXT "sh"
#define ECHO_QUOTE "'"
-#define ECHO_BLANK ""
+#define ECHO_BLANK ""
#else
#define pg_copy_file CopyFile
#define pg_mv_file pgrename
#define pg_link_file win32_pghardlink
#define sleep(x) Sleep(x * 1000)
-#define PATH_SEPARATOR '\\'
+#define PATH_SEPARATOR '\\'
#define RM_CMD "DEL /q"
#define RMDIR_CMD "RMDIR /s/q"
#define SCRIPT_EXT "bat"
#define EXE_EXT ".exe"
#define ECHO_QUOTE ""
-#define ECHO_BLANK "."
+#define ECHO_BLANK "."
#endif
#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \
typedef struct
{
/* Can't use NAMEDATALEN; not guaranteed to fit on client */
- char *nspname; /* namespace name */
- char *relname; /* relation name */
+ char *nspname; /* namespace name */
+ char *relname; /* relation name */
Oid reloid; /* relation oid */
Oid relfilenode; /* relation relfile node */
/* relation tablespace path, or "" for the cluster default */
Oid old_relfilenode;
Oid new_relfilenode;
/* the rest are used only for logging and error reporting */
- char *nspname; /* namespaces */
- char *relname;
+ char *nspname; /* namespaces */
+ char *relname;
} FileNameMap;
/*
typedef struct
{
Oid db_oid; /* oid of the database */
- char *db_name; /* database name */
+ char *db_name; /* database name */
char db_tblspace[MAXPGPATH]; /* database default tablespace path */
RelInfoArr rel_arr; /* array of all user relinfos */
} DbInfo;
char major_version_str[64]; /* string PG_VERSION of cluster */
uint32 bin_version; /* version returned from pg_ctl */
Oid pg_database_oid; /* OID of pg_database relation */
- Oid install_role_oid; /* OID of connected role */
- Oid role_count; /* number of roles defined in the cluster */
+ Oid install_role_oid; /* OID of connected role */
+ Oid role_count; /* number of roles defined in the cluster */
char *tablespace_suffix; /* directory specification */
} ClusterInfo;
/* check.c */
void output_check_banner(bool live_check);
-void check_and_dump_old_cluster(bool live_check,
- char **sequence_script_file_name);
+void check_and_dump_old_cluster(bool live_check,
+ char **sequence_script_file_name);
void check_new_cluster(void);
void report_clusters_compatible(void);
void issue_warnings(char *sequence_script_file_name);
-void output_completion_banner(char *analyze_script_file_name,
+void output_completion_banner(char *analyze_script_file_name,
char *deletion_script_file_name);
void check_cluster_versions(void);
void check_cluster_compatibility(bool live_check);
/* relfilenode.c */
void get_pg_database_relfilenode(ClusterInfo *cluster);
-void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
- DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
-void transfer_all_new_dbs(DbInfoArr *old_db_arr,
+void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
+ DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
+void transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata,
- char *old_tablespace);
+ char *old_tablespace);
/* tablespace.c */
char *old_8_3_create_sequence_script(ClusterInfo *cluster);
/* parallel.c */
-void parallel_exec_prog(const char *log_file, const char *opt_log_file,
- const char *fmt,...)
+void
+parallel_exec_prog(const char *log_file, const char *opt_log_file,
+ const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
-void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
- char *old_pgdata, char *new_pgdata,
- char *old_tablespace);
+void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+ char *old_pgdata, char *new_pgdata,
+ char *old_tablespace);
bool reap_child(bool wait_for_child);
-
static void transfer_single_new_db(pageCnvCtx *pageConverter,
FileNameMap *maps, int size, char *old_tablespace);
static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
- const char *suffix);
+ const char *suffix);
/*
*/
void
transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
- char *old_pgdata, char *new_pgdata)
+ char *old_pgdata, char *new_pgdata)
{
pg_log(PG_REPORT, "%s user relation files\n",
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/*
- * Transfering files by tablespace is tricky because a single database
- * can use multiple tablespaces. For non-parallel mode, we just pass a
- * NULL tablespace path, which matches all tablespaces. In parallel mode,
- * we pass the default tablespace and all user-created tablespaces
- * and let those operations happen in parallel.
+ * Transfering files by tablespace is tricky because a single database can
+ * use multiple tablespaces. For non-parallel mode, we just pass a NULL
+ * tablespace path, which matches all tablespaces. In parallel mode, we
+ * pass the default tablespace and all user-created tablespaces and let
+ * those operations happen in parallel.
*/
if (user_opts.jobs <= 1)
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
new_pgdata, NULL);
else
{
- int tblnum;
+ int tblnum;
/* transfer default tablespace */
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
- new_pgdata, old_pgdata);
+ new_pgdata, old_pgdata);
for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
- new_pgdata, os_info.old_tablespaces[tblnum]);
+ new_pgdata, os_info.old_tablespaces[tblnum]);
/* reap all children */
while (reap_child(true) == true)
;
*/
void
transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
- char *old_pgdata, char *new_pgdata, char *old_tablespace)
+ char *old_pgdata, char *new_pgdata, char *old_tablespace)
{
int old_dbnum,
new_dbnum;
{
int mapnum;
bool vm_crashsafe_match = true;
-
+
/*
* Do the old and new cluster disagree on the crash-safetiness of the vm
- * files? If so, do not copy them.
- */
+ * files? If so, do not copy them.
+ */
if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
vm_crashsafe_match = false;
{
/* transfer primary file */
transfer_relfile(pageConverter, &maps[mapnum], "");
-
+
/* fsm/vm files added in PG 8.4 */
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
{
int fd;
int segno;
char extent_suffix[65];
-
+
/*
- * Now copy/link any related segments as well. Remember, PG breaks
- * large files into 1GB segments, the first segment has no extension,
- * subsequent segments are named relfilenode.1, relfilenode.2,
- * relfilenode.3.
- * copied.
+ * Now copy/link any related segments as well. Remember, PG breaks large
+ * files into 1GB segments, the first segment has no extension, subsequent
+ * segments are named relfilenode.1, relfilenode.2, relfilenode.3. copied.
*/
for (segno = 0;; segno++)
{
snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno);
snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", map->old_tablespace,
- map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
+ map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
type_suffix, extent_suffix);
snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", map->new_tablespace,
- map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
+ map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
type_suffix, extent_suffix);
-
+
/* Is it an extent, fsm, or vm file? */
if (type_suffix[0] != '\0' || segno != 0)
{
}
unlink(new_file);
-
+
/* Copying files might take some time, so give feedback. */
pg_log(PG_STATUS, "%s", old_file);
-
+
if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
pg_log(PG_FATAL, "This upgrade requires page-by-page conversion, "
"you must use copy mode instead of link mode.\n");
-
+
if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
{
pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file);
-
+
if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
pg_log(PG_FATAL, "error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
map->nspname, map->relname, old_file, new_file, msg);
else
{
pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file);
-
+
if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
pg_log(PG_FATAL,
"error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
map->nspname, map->relname, old_file, new_file, msg);
}
- }
+ }
return;
}
-
char *
cluster_conn_opts(ClusterInfo *cluster)
{
- static char conn_opts[MAXPGPATH + NAMEDATALEN + 100];
+ static char conn_opts[MAXPGPATH + NAMEDATALEN + 100];
if (cluster->sockdir)
snprintf(conn_opts, sizeof(conn_opts),
strcat(socket_string,
" -c listen_addresses='' -c unix_socket_permissions=0700");
- /* Have a sockdir? Tell the postmaster. */
+ /* Have a sockdir? Tell the postmaster. */
if (cluster->sockdir)
snprintf(socket_string + strlen(socket_string),
sizeof(socket_string) - strlen(socket_string),
* win on ext4.
*/
snprintf(cmd, sizeof(cmd),
- "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
+ "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
(cluster->controldata.cat_ver >=
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" :
" -c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
(cluster == &new_cluster) ?
- " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "",
+ " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "",
cluster->pgopts ? cluster->pgopts : "", socket_string);
/*
* it might supply a reason for the failure.
*/
pg_ctl_return = exec_prog(SERVER_START_LOG_FILE,
- /* pass both file names if they differ */
+ /* pass both file names if they differ */
(strcmp(SERVER_LOG_FILE,
SERVER_START_LOG_FILE) != 0) ?
SERVER_LOG_FILE : NULL,
if (!pg_ctl_return && !throw_error)
return false;
-
+
/* Check to see if we can connect to the server; if not, report it. */
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
PQstatus(conn) != CONNECTION_OK)
if ((os_info.num_old_tablespaces = PQntuples(res)) != 0)
os_info.old_tablespaces = (char **) pg_malloc(
- os_info.num_old_tablespaces * sizeof(char *));
+ os_info.num_old_tablespaces * sizeof(char *));
else
os_info.old_tablespaces = NULL;
end_progress_output(void)
{
/*
- * In case nothing printed; pass a space so gcc doesn't complain about
- * empty format string.
+ * In case nothing printed; pass a space so gcc doesn't complain about
+ * empty format string.
*/
prep_status(" ");
}
/* for output to a display, do leading truncation and append \r */
if (isatty(fileno(stdout)))
/* -2 because we use a 2-space indent */
- printf(" %s%-*.*s\r",
- /* prefix with "..." if we do leading truncation */
- strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...",
- MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2,
- /* optional leading truncation */
- strlen(message) <= MESSAGE_WIDTH - 2 ? message :
- message + strlen(message) - MESSAGE_WIDTH + 3 + 2);
+ printf(" %s%-*.*s\r",
+ /* prefix with "..." if we do leading truncation */
+ strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...",
+ MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2,
+ /* optional leading truncation */
+ strlen(message) <= MESSAGE_WIDTH - 2 ? message :
+ message + strlen(message) - MESSAGE_WIDTH + 3 + 2);
else
printf(" %s\n", _(message));
break;
/*
* Stopgap implementation of timestamptz_to_str that doesn't depend on backend
- * infrastructure. This will work for timestamps that are within the range
+ * infrastructure. This will work for timestamps that are within the range
* of the platform time_t type. (pg_time_t is compatible except for possibly
* being wider.)
*
* be linked/called.
*/
void
-appendStringInfo(StringInfo str, const char *fmt, ...)
+appendStringInfo(StringInfo str, const char *fmt,...)
{
va_list args;
static void
print_rmgr_list(void)
{
- int i;
+ int i;
for (i = 0; i < RM_MAX_ID + 1; i++)
{
static bool
verify_directory(const char *directory)
{
- DIR *dir = opendir(directory);
+ DIR *dir = opendir(directory);
+
if (dir == NULL)
return false;
closedir(dir);
if (sep != NULL)
{
*dir = pg_strdup(path);
- (*dir)[(sep - path) + 1] = '\0'; /* no strndup */
+ (*dir)[(sep - path) + 1] = '\0'; /* no strndup */
*fname = pg_strdup(sep + 1);
}
/* local directory */
else if (!XLByteInSeg(private.startptr, segno))
{
fprintf(stderr,
- "%s: start log position %X/%X is not inside file \"%s\"\n",
+ "%s: start log position %X/%X is not inside file \"%s\"\n",
progname,
(uint32) (private.startptr >> 32),
(uint32) private.startptr,
(uint32) private.startptr);
/*
- * Display a message that we're skipping data if `from` wasn't a pointer to
- * the start of a record and also wasn't a pointer to the beginning of a
- * segment (e.g. we were used in file mode).
+ * Display a message that we're skipping data if `from` wasn't a pointer
+ * to the start of a record and also wasn't a pointer to the beginning of
+ * a segment (e.g. we were used in file mode).
*/
if (first_record != private.startptr && (private.startptr % XLogSegSize) != 0)
printf("first record is after %X/%X, at %X/%X, skipping over %u bytes\n",
const RmgrDescData RmgrDescTable[RM_MAX_ID + 1] = {
#include "access/rmgrlist.h"
};
-
typedef struct RmgrDescData
{
const char *rm_name;
- void (*rm_desc) (StringInfo buf, uint8 xl_info, char *rec);
+ void (*rm_desc) (StringInfo buf, uint8 xl_info, char *rec);
} RmgrDescData;
extern const RmgrDescData RmgrDescTable[];
-#endif /* RMGRDESC_H */
+#endif /* RMGRDESC_H */
bool use_log; /* log transaction latencies to a file */
bool use_quiet; /* quiet logging onto stderr */
-int agg_interval; /* log aggregates instead of individual transactions */
+int agg_interval; /* log aggregates instead of individual
+ * transactions */
bool is_connect; /* establish connection for each transaction */
bool is_latencies; /* report per-command latencies */
int main_pid; /* main process id used in log filename */
typedef struct
{
- long start_time; /* when does the interval start */
- int cnt; /* number of transactions */
- double min_duration; /* min/max durations */
- double max_duration;
- double sum; /* sum(duration), sum(duration^2) - for estimates */
- double sum2;
-
+ long start_time; /* when does the interval start */
+ int cnt; /* number of transactions */
+ double min_duration; /* min/max durations */
+ double max_duration;
+ double sum; /* sum(duration), sum(duration^2) - for
+ * estimates */
+ double sum2;
+
} AggVals;
static Command **sql_files[MAX_FILES]; /* SQL script files */
}
static
-void agg_vals_init(AggVals * aggs, instr_time start)
+void
+agg_vals_init(AggVals *aggs, instr_time start)
{
/* basic counters */
- aggs->cnt = 0; /* number of transactions */
- aggs->sum = 0; /* SUM(duration) */
- aggs->sum2 = 0; /* SUM(duration*duration) */
+ aggs->cnt = 0; /* number of transactions */
+ aggs->sum = 0; /* SUM(duration) */
+ aggs->sum2 = 0; /* SUM(duration*duration) */
/* min and max transaction duration */
aggs->min_duration = 0;
/* return false iff client should be disconnected */
static bool
-doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVals * agg)
+doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVals *agg)
{
PGresult *res;
Command **commands;
/* should we aggregate the results or not? */
if (agg_interval > 0)
{
- /* are we still in the same interval? if yes, accumulate the
- * values (print them otherwise) */
+ /*
+ * are we still in the same interval? if yes, accumulate
+ * the values (print them otherwise)
+ */
if (agg->start_time + agg_interval >= INSTR_TIME_GET_DOUBLE(now))
{
agg->cnt += 1;
- agg->sum += usec;
+ agg->sum += usec;
agg->sum2 += usec * usec;
/* first in this aggregation interval */
if ((agg->cnt == 1) || (usec < agg->min_duration))
- agg->min_duration = usec;
+ agg->min_duration = usec;
if ((agg->cnt == 1) || (usec > agg->max_duration))
agg->max_duration = usec;
}
else
{
- /* Loop until we reach the interval of the current transaction (and
- * print all the empty intervals in between). */
+ /*
+ * Loop until we reach the interval of the current
+ * transaction (and print all the empty intervals in
+ * between).
+ */
while (agg->start_time + agg_interval < INSTR_TIME_GET_DOUBLE(now))
{
- /* This is a non-Windows branch (thanks to the ifdef in usage), so
- * we don't need to handle this in a special way (see below). */
+ /*
+ * This is a non-Windows branch (thanks to the
+ * ifdef in usage), so we don't need to handle
+ * this in a special way (see below).
+ */
fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n",
- agg->start_time, agg->cnt, agg->sum, agg->sum2,
+ agg->start_time, agg->cnt, agg->sum, agg->sum2,
agg->min_duration, agg->max_duration);
/* move to the next inteval */
agg->sum2 = 0;
}
- /* and now update the reset values (include the current) */
+ /*
+ * and now update the reset values (include the
+ * current)
+ */
agg->cnt = 1;
agg->min_duration = usec;
agg->max_duration = usec;
{
/* no, print raw transactions */
#ifndef WIN32
- /* This is more than we really ought to know about instr_time */
+
+ /*
+ * This is more than we really ought to know about
+ * instr_time
+ */
fprintf(logfile, "%d %d %.0f %d %ld %ld\n",
st->id, st->cnt, usec, st->use_file,
(long) now.tv_sec, (long) now.tv_usec);
#else
- /* On Windows, instr_time doesn't provide a timestamp anyway */
+
+ /*
+ * On Windows, instr_time doesn't provide a timestamp
+ * anyway
+ */
fprintf(logfile, "%d %d %.0f %d 0 0\n",
st->id, st->cnt, usec, st->use_file);
#endif
}
/*
- * getrand() needs to be able to subtract max from min and add
- * one to the result without overflowing. Since we know max > min,
- * we can detect overflow just by checking for a negative result.
- * But we must check both that the subtraction doesn't overflow,
- * and that adding one to the result doesn't overflow either.
+ * getrand() needs to be able to subtract max from min and add one
+ * to the result without overflowing. Since we know max > min, we
+ * can detect overflow just by checking for a negative result. But
+ * we must check both that the subtraction doesn't overflow, and
+ * that adding one to the result doesn't overflow either.
*/
if (max - min < 0 || (max - min) + 1 < 0)
{
static void
init(bool is_no_vacuum)
{
-
/* The scale factor at/beyond which 32bit integers are incapable of storing
* 64bit values.
*
{
"pgbench_history",
scale >= SCALE_32BIT_THRESHOLD
- ? "tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)"
- : "tid int,bid int,aid int,delta int,mtime timestamp,filler char(22)",
+ ? "tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)"
+ : "tid int,bid int,aid int,delta int,mtime timestamp,filler char(22)",
0
},
{
{
"pgbench_accounts",
scale >= SCALE_32BIT_THRESHOLD
- ? "aid bigint not null,bid int,abalance int,filler char(84)"
- : "aid int not null,bid int,abalance int,filler char(84)",
+ ? "aid bigint not null,bid int,abalance int,filler char(84)"
+ : "aid int not null,bid int,abalance int,filler char(84)",
1
},
{
int64 k;
/* used to track elapsed time and estimate of the remaining time */
- instr_time start, diff;
- double elapsed_sec, remaining_sec;
+ instr_time start,
+ diff;
+ double elapsed_sec,
+ remaining_sec;
int log_interval = 1;
if ((con = doConnect()) == NULL)
exit(1);
}
- /* If we want to stick with the original logging, print a message each
- * 100k inserted rows. */
- if ((! use_quiet) && (j % 100000 == 0))
+ /*
+ * If we want to stick with the original logging, print a message each
+ * 100k inserted rows.
+ */
+ if ((!use_quiet) && (j % 100000 == 0))
{
INSTR_TIME_SET_CURRENT(diff);
INSTR_TIME_SUBTRACT(diff, start);
remaining_sec = (scale * naccounts - j) * elapsed_sec / j;
fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
- j, (int64)naccounts * scale,
- (int) (((int64) j * 100) / (naccounts * scale)),
- elapsed_sec, remaining_sec);
+ j, (int64) naccounts * scale,
+ (int) (((int64) j * 100) / (naccounts * scale)),
+ elapsed_sec, remaining_sec);
}
/* let's not call the timing for each row, but only each 100 rows */
else if (use_quiet && (j % 100 == 0))
remaining_sec = (scale * naccounts - j) * elapsed_sec / j;
/* have we reached the next interval (or end)? */
- if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS)) {
+ if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS))
+ {
fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
- j, (int64)naccounts * scale,
+ j, (int64) naccounts * scale,
(int) (((int64) j * 100) / (naccounts * scale)), elapsed_sec, remaining_sec);
/* skip to the next interval */
- log_interval = (int)ceil(elapsed_sec/LOG_STEP_SECONDS);
+ log_interval = (int) ceil(elapsed_sec / LOG_STEP_SECONDS);
}
}
exit(1);
}
- if (agg_interval > 0 && (! use_log)) {
+ if (agg_interval > 0 && (!use_log))
+ {
fprintf(stderr, "log aggregation is allowed only when actually logging transactions\n");
exit(1);
}
- if ((duration > 0) && (agg_interval > duration)) {
+ if ((duration > 0) && (agg_interval > duration))
+ {
fprintf(stderr, "number of seconds for aggregation (%d) must not be higher that test duration (%d)\n", agg_interval, duration);
exit(1);
}
- if ((duration > 0) && (agg_interval > 0) && (duration % agg_interval != 0)) {
+ if ((duration > 0) && (agg_interval > 0) && (duration % agg_interval != 0))
+ {
fprintf(stderr, "duration (%d) must be a multiple of aggregation interval (%d)\n", duration, agg_interval);
exit(1);
}
AggVals aggs;
result = pg_malloc(sizeof(TResult));
-
+
INSTR_TIME_SET_ZERO(result->conn_time);
/* open log file if requested */
INSTR_TIME_SUBTRACT(result->conn_time, thread->start_time);
agg_vals_init(&aggs, thread->start_time);
-
+
/* send start up queries in async manner */
for (i = 0; i < nstate; i++)
{
mp_result mp_int_mul_pow2(mp_int a, int p2, mp_int c);
mp_result mp_int_sqr(mp_int a, mp_int c); /* c = a * a */
-mp_result
-mp_int_div(mp_int a, mp_int b, /* q = a / b */
+mp_result mp_int_div(mp_int a, mp_int b, /* q = a / b */
mp_int q, mp_int r); /* r = a % b */
-mp_result
-mp_int_div_value(mp_int a, int value, /* q = a / value */
+mp_result mp_int_div_value(mp_int a, int value, /* q = a / value */
mp_int q, int *r); /* r = a % value */
-mp_result
-mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */
+mp_result mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */
mp_int q, mp_int r); /* r = q % 2^p2 */
mp_result mp_int_mod(mp_int a, mp_int m, mp_int c); /* c = a % m */
/* Returns k >= 0 such that z = 2^k, if one exists; otherwise < 0 */
int mp_int_is_pow2(mp_int z);
-mp_result
-mp_int_exptmod(mp_int a, mp_int b, mp_int m,
+mp_result mp_int_exptmod(mp_int a, mp_int b, mp_int m,
mp_int c); /* c = a^b (mod m) */
-mp_result
-mp_int_exptmod_evalue(mp_int a, int value,
+mp_result mp_int_exptmod_evalue(mp_int a, int value,
mp_int m, mp_int c); /* c = a^v (mod m) */
-mp_result
-mp_int_exptmod_bvalue(int value, mp_int b,
+mp_result mp_int_exptmod_bvalue(int value, mp_int b,
mp_int m, mp_int c); /* c = v^b (mod m) */
-mp_result
-mp_int_exptmod_known(mp_int a, mp_int b,
+mp_result mp_int_exptmod_known(mp_int a, mp_int b,
mp_int m, mp_int mu,
mp_int c); /* c = a^b (mod m) */
mp_result mp_int_redux_const(mp_int m, mp_int c);
mp_result mp_int_gcd(mp_int a, mp_int b, mp_int c); /* c = gcd(a, b) */
-mp_result
-mp_int_egcd(mp_int a, mp_int b, mp_int c, /* c = gcd(a, b) */
+mp_result mp_int_egcd(mp_int a, mp_int b, mp_int c, /* c = gcd(a, b) */
mp_int x, mp_int y); /* c = ax + by */
mp_result mp_int_sqrt(mp_int a, mp_int c); /* c = floor(sqrt(q)) */
int pgp_s2k_process(PGP_S2K *s2k, int cipher, const uint8 *key, int klen);
typedef struct PGP_CFB PGP_CFB;
-int
-pgp_cfb_create(PGP_CFB **ctx_p, int algo,
+int pgp_cfb_create(PGP_CFB **ctx_p, int algo,
const uint8 *key, int key_len, int recync, uint8 *iv);
void pgp_cfb_free(PGP_CFB *ctx);
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
/* scan the relation */
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- HTSU_Result htsu;
+ HTSU_Result htsu;
TransactionId xmax;
uint16 infomask;
values = (char **) palloc(mydata->ncolumns * sizeof(char *));
values[Atnum_tid] = (char *) DirectFunctionCall1(tidout,
- PointerGetDatum(&tuple->t_self));
+ PointerGetDatum(&tuple->t_self));
values[Atnum_xmax] = palloc(NCHARS * sizeof(char));
snprintf(values[Atnum_xmax], NCHARS, "%d", xmax);
values[Atnum_ismulti] = pstrdup("true");
allow_old = !(infomask & HEAP_LOCK_MASK) &&
- (infomask & HEAP_XMAX_LOCK_ONLY);
+ (infomask & HEAP_XMAX_LOCK_ONLY);
nmembers = GetMultiXactIdMembers(xmax, &members, allow_old);
if (nmembers == -1)
{
result = HeapTupleGetDatum(tuple);
/*
- * no need to pfree what we allocated; it's on a short-lived memory
- * context anyway
+ * no need to pfree what we allocated; it's on a short-lived
+ * memory context anyway
*/
SRF_RETURN_NEXT(funcctx, result);
{
int32 version;
- BlockNumber pending_pages;
+ BlockNumber pending_pages;
int64 pending_tuples;
} GinIndexStat;
Relation rel;
Buffer buffer;
Page page;
- GinMetaPageData *metadata;
+ GinMetaPageData *metadata;
GinIndexStat stats;
HeapTuple tuple;
TupleDesc tupleDesc;
if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot access temporary indexes of other sessions")));
+ errmsg("cannot access temporary indexes of other sessions")));
/*
* Read metapage
* anyway. However it makes the regression test outputs more predictable.
*
* We don't risk setting remote zone equal to ours, since the remote
- * server might use a different timezone database. Instead, use UTC
+ * server might use a different timezone database. Instead, use UTC
* (quoted, because very old servers are picky about case).
*/
do_sql_command(conn, "SET timezone = 'UTC'");
/* extracted fdw_private data */
char *query; /* text of SELECT command */
- List *retrieved_attrs; /* list of retrieved attribute numbers */
+ List *retrieved_attrs; /* list of retrieved attribute numbers */
/* for remote query execution */
PGconn *conn; /* connection for the scan */
char *query; /* text of INSERT/UPDATE/DELETE command */
List *target_attrs; /* list of target attribute numbers */
bool has_returning; /* is there a RETURNING clause? */
- List *retrieved_attrs; /* attr numbers retrieved by RETURNING */
+ List *retrieved_attrs; /* attr numbers retrieved by RETURNING */
/* info about parameters for prepared statement */
AttrNumber ctidAttno; /* attnum of input resjunk ctid column */
{
Relation rel; /* relcache entry for the foreign table */
AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
- List *retrieved_attrs; /* attr numbers retrieved by query */
+ List *retrieved_attrs; /* attr numbers retrieved by query */
/* collected sample rows */
HeapTuple *rows; /* array of size targrows */
/*
* If the table or the server is configured to use remote estimates,
- * identify which user to do remote access as during planning. This
- * should match what ExecCheckRTEPerms() does. If we fail due to lack of
+ * identify which user to do remote access as during planning. This
+ * should match what ExecCheckRTEPerms() does. If we fail due to lack of
* permissions, the query would have failed at runtime anyway.
*/
if (fpinfo->use_remote_estimate)
/*
* Identify which attributes will need to be retrieved from the remote
- * server. These include all attrs needed for joins or final output, plus
+ * server. These include all attrs needed for joins or final output, plus
* all attrs used in the local_conds. (Note: if we end up using a
* parameterized scan, it's possible that some of the join clauses will be
* sent to the remote and thus we wouldn't really need to retrieve the
fsstate->query = strVal(list_nth(fsplan->fdw_private,
FdwScanPrivateSelectSql));
fsstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
- FdwScanPrivateRetrievedAttrs);
+ FdwScanPrivateRetrievedAttrs);
/* Create contexts for batches of tuples and per-tuple temp workspace. */
fsstate->batch_cxt = AllocSetContextCreate(estate->es_query_cxt,
fmstate->has_returning = intVal(list_nth(fdw_private,
FdwModifyPrivateHasReturning));
fmstate->retrieved_attrs = (List *) list_nth(fdw_private,
- FdwModifyPrivateRetrievedAttrs);
+ FdwModifyPrivateRetrievedAttrs);
/* Create context for per-tuple temp workspace. */
fmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
* Notice that we pass NULL for paramTypes, thus forcing the remote server
* to infer types for all parameters. Since we explicitly cast every
* parameter (see deparse.c), the "inference" is trivial and will produce
- * the desired result. This allows us to avoid assuming that the remote
+ * the desired result. This allows us to avoid assuming that the remote
* server has the same OIDs we do for the parameters' types.
*
* We don't use a PG_TRY block here, so be careful not to throw error
astate->rows[pos] = make_tuple_from_result_row(res, row,
astate->rel,
astate->attinmeta,
- astate->retrieved_attrs,
+ astate->retrieved_attrs,
astate->temp_cxt);
MemoryContextSwitchTo(oldcontext);
List **retrieved_attrs);
extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
- List **retrieved_attrs);
+ List **retrieved_attrs);
#endif /* POSTGRES_FDW_H */
case OAT_POST_CREATE:
{
ObjectAccessPostCreate *pc_arg = arg;
- bool is_internal;
+ bool is_internal;
is_internal = pc_arg ? pc_arg->is_internal : false;
case DatabaseRelationId:
Assert(!is_internal);
sepgsql_database_post_create(objectId,
- sepgsql_context_info.createdb_dtemplate);
+ sepgsql_context_info.createdb_dtemplate);
break;
case NamespaceRelationId:
case OAT_POST_ALTER:
{
- ObjectAccessPostAlter *pa_arg = arg;
- bool is_internal = pa_arg->is_internal;
+ ObjectAccessPostAlter *pa_arg = arg;
+ bool is_internal = pa_arg->is_internal;
switch (classId)
{
case RelationRelationId:
if (subId == 0)
- {
+ {
/*
* A case when we don't want to apply permission
* check is that relation is internally altered
- * without user's intention. E.g, no need to
- * check on toast table/index to be renamed at
- * end of the table rewrites.
+ * without user's intention. E.g, no need to check
+ * on toast table/index to be renamed at end of
+ * the table rewrites.
*/
if (is_internal)
- break;
+ break;
sepgsql_relation_setattr(objectId);
- }
- else
- sepgsql_attribute_setattr(objectId, subId);
+ }
+ else
+ sepgsql_attribute_setattr(objectId, subId);
break;
case ProcedureRelationId:
case OAT_NAMESPACE_SEARCH:
{
- ObjectAccessNamespaceSearch *ns_arg = arg;
+ ObjectAccessNamespaceSearch *ns_arg = arg;
/*
- * If stacked extension already decided not to allow users
- * to search this schema, we just stick with that decision.
+ * If stacked extension already decided not to allow users to
+ * search this schema, we just stick with that decision.
*/
if (!ns_arg->result)
break;
*
* Also, db_procedure:entrypoint permission should be checked
* whether this procedure can perform as an entrypoint of the
- * trusted procedure, or not.
- * Note that db_procedure:execute permission shall be checked
- * individually.
+ * trusted procedure, or not. Note that db_procedure:execute
+ * permission shall be checked individually.
*/
if (stack->new_label)
{
void
sepgsql_proc_setattr(Oid functionId)
{
- Relation rel;
- ScanKeyData skey;
- SysScanDesc sscan;
- HeapTuple oldtup;
- HeapTuple newtup;
- Form_pg_proc oldform;
- Form_pg_proc newform;
- uint32 required;
- ObjectAddress object;
- char *audit_name;
+ Relation rel;
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple oldtup;
+ HeapTuple newtup;
+ Form_pg_proc oldform;
+ Form_pg_proc newform;
+ uint32 required;
+ ObjectAddress object;
+ char *audit_name;
/*
* Fetch newer catalog
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_PROCEDURE,
- required,
+ required,
audit_name,
true);
/* cleanups */
#include "sepgsql.h"
-static void sepgsql_index_modify(Oid indexOid);
+static void sepgsql_index_modify(Oid indexOid);
/*
* sepgsql_attribute_post_create
void
sepgsql_relation_setattr(Oid relOid)
{
- Relation rel;
- ScanKeyData skey;
- SysScanDesc sscan;
- HeapTuple oldtup;
- HeapTuple newtup;
- Form_pg_class oldform;
- Form_pg_class newform;
+ Relation rel;
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple oldtup;
+ HeapTuple newtup;
+ Form_pg_class oldform;
+ Form_pg_class newform;
ObjectAddress object;
char *audit_name;
uint16_t tclass;
AttrNumber anum_relation_id,
AttrNumber anum_extra_id)
{
- ScanKeyData skey;
- SysScanDesc sscan;
+ ScanKeyData skey;
+ SysScanDesc sscan;
HeapTuple tuple;
Datum datum;
bool isnull;
/*
* sepgsql_index_modify
- * Handle index create, update, drop
+ * Handle index create, update, drop
*
* Unlike other relation kinds, indexes do not have their own security labels,
* so instead of doing checks directly, treat them as extra attributes of their
tcontext,
SEPG_CLASS_DB_SCHEMA,
nsp_name);
+
/*
* check db_schema:{create}
*/
static TrieChar *
placeChar(TrieChar *node, unsigned char *str, int lenstr, char *replaceTo, int replacelen)
{
- TrieChar *curnode;
+ TrieChar *curnode;
if (!node)
{
static TrieChar *
initTrie(char *filename)
{
- TrieChar *volatile rootTrie = NULL;
+ TrieChar *volatile rootTrie = NULL;
MemoryContext ccxt = CurrentMemoryContext;
tsearch_readline_state trst;
volatile bool skip;
if (state >= 3)
rootTrie = placeChar(rootTrie,
- (unsigned char *) src, srclen,
- trg, trglen);
+ (unsigned char *) src, srclen,
+ trg, trglen);
pfree(line);
}
*/
do
{
-#define PARAMS_ARRAY_SIZE 7
+#define PARAMS_ARRAY_SIZE 7
const char *keywords[PARAMS_ARRAY_SIZE];
const char *values[PARAMS_ARRAY_SIZE];
PG_MODULE_MAGIC;
-void _PG_init(void);
+void _PG_init(void);
/* flags set by signal handlers */
static volatile sig_atomic_t got_sighup = false;
static volatile sig_atomic_t got_sigterm = false;
/* GUC variables */
-static int worker_spi_naptime = 10;
-static int worker_spi_total_workers = 2;
+static int worker_spi_naptime = 10;
+static int worker_spi_total_workers = 2;
typedef struct worktable
{
- const char *schema;
- const char *name;
+ const char *schema;
+ const char *name;
} worktable;
/*
* Signal handler for SIGTERM
- * Set a flag to let the main loop to terminate, and set our latch to wake
- * it up.
+ * Set a flag to let the main loop to terminate, and set our latch to wake
+ * it up.
*/
static void
worker_spi_sigterm(SIGNAL_ARGS)
/*
* Signal handler for SIGHUP
- * Set a flag to let the main loop to reread the config file, and set
- * our latch to wake it up.
+ * Set a flag to let the main loop to reread the config file, and set
+ * our latch to wake it up.
*/
static void
worker_spi_sighup(SIGNAL_ARGS)
static void
initialize_worker_spi(worktable *table)
{
- int ret;
- int ntup;
- bool isnull;
- StringInfoData buf;
+ int ret;
+ int ntup;
+ bool isnull;
+ StringInfoData buf;
SetCurrentStatementStartTimestamp();
StartTransactionCommand();
appendStringInfo(&buf,
"CREATE SCHEMA \"%s\" "
"CREATE TABLE \"%s\" ("
- " type text CHECK (type IN ('total', 'delta')), "
+ " type text CHECK (type IN ('total', 'delta')), "
" value integer)"
- "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) "
+ "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) "
"WHERE type = 'total'",
- table->schema, table->name, table->name, table->name);
+ table->schema, table->name, table->name, table->name);
/* set statement start time */
SetCurrentStatementStartTimestamp();
static void
worker_spi_main(void *main_arg)
{
- worktable *table = (worktable *) main_arg;
- StringInfoData buf;
+ worktable *table = (worktable *) main_arg;
+ StringInfoData buf;
/* We're now ready to receive signals */
BackgroundWorkerUnblockSignals();
initialize_worker_spi(table);
/*
- * Quote identifiers passed to us. Note that this must be done after
+ * Quote identifiers passed to us. Note that this must be done after
* initialize_worker_spi, because that routine assumes the names are not
* quoted.
*
*/
while (!got_sigterm)
{
- int ret;
- int rc;
+ int ret;
+ int rc;
/*
* Background workers mustn't call usleep() or any direct equivalent:
/*
* In case of a SIGHUP, just reload the configuration.
*/
- if (got_sighup)
- {
- got_sighup = false;
- ProcessConfigFile(PGC_SIGHUP);
- }
+ if (got_sighup)
+ {
+ got_sighup = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
/*
* Start a transaction on which we can run queries. Note that each
* StartTransactionCommand() call should be preceded by a
* SetCurrentStatementStartTimestamp() call, which sets both the time
* for the statement we're about the run, and also the transaction
- * start time. Also, each other query sent to SPI should probably be
+ * start time. Also, each other query sent to SPI should probably be
* preceded by SetCurrentStatementStartTimestamp(), so that statement
* start time is always up to date.
*
* The SPI_connect() call lets us run queries through the SPI manager,
- * and the PushActiveSnapshot() call creates an "active" snapshot which
- * is necessary for queries to have MVCC data to work on.
+ * and the PushActiveSnapshot() call creates an "active" snapshot
+ * which is necessary for queries to have MVCC data to work on.
*
- * The pgstat_report_activity() call makes our activity visible through
- * the pgstat views.
+ * The pgstat_report_activity() call makes our activity visible
+ * through the pgstat views.
*/
SetCurrentStatementStartTimestamp();
StartTransactionCommand();
if (SPI_processed > 0)
{
- bool isnull;
- int32 val;
+ bool isnull;
+ int32 val;
val = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0],
- SPI_tuptable->tupdesc,
- 1, &isnull));
+ SPI_tuptable->tupdesc,
+ 1, &isnull));
if (!isnull)
elog(LOG, "%s: count in %s.%s is now %d",
MyBgworkerEntry->bgw_name,
void
_PG_init(void)
{
- BackgroundWorker worker;
- worktable *table;
- unsigned int i;
- char name[20];
+ BackgroundWorker worker;
+ worktable *table;
+ unsigned int i;
+ char name[20];
/* get the configuration */
DefineCustomIntVariable("worker_spi.naptime",
- "Duration between each check (in seconds).",
- NULL,
- &worker_spi_naptime,
- 10,
- 1,
- INT_MAX,
- PGC_SIGHUP,
- 0,
- NULL,
- NULL,
- NULL);
+ "Duration between each check (in seconds).",
+ NULL,
+ &worker_spi_naptime,
+ 10,
+ 1,
+ INT_MAX,
+ PGC_SIGHUP,
+ 0,
+ NULL,
+ NULL,
+ NULL);
DefineCustomIntVariable("worker_spi.total_workers",
- "Number of workers.",
- NULL,
- &worker_spi_total_workers,
- 2,
- 1,
- 100,
- PGC_POSTMASTER,
- 0,
- NULL,
- NULL,
- NULL);
+ "Number of workers.",
+ NULL,
+ &worker_spi_total_workers,
+ 2,
+ 1,
+ 100,
+ PGC_POSTMASTER,
+ 0,
+ NULL,
+ NULL,
+ NULL);
/* set up common data for all our workers */
worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
ginFindParents(GinBtree btree, GinBtreeStack *stack,
BlockNumber rootBlkno)
{
-
Page page;
Buffer buffer;
BlockNumber blkno,
newtup = gistgetadjusted(indexrel, idxtuple, itup, giststate);
if (newtup)
{
- blkno = gistbufferinginserttuples(buildstate, buffer, level,
- &newtup, 1, childoffnum,
- InvalidBlockNumber, InvalidOffsetNumber);
+ blkno = gistbufferinginserttuples(buildstate, buffer, level,
+ &newtup, 1, childoffnum,
+ InvalidBlockNumber, InvalidOffsetNumber);
/* gistbufferinginserttuples() released the buffer */
}
else
GISTBuildBuffers *gfbb = buildstate->gfbb;
List *splitinfo;
bool is_split;
- BlockNumber placed_to_blk = InvalidBlockNumber;
+ BlockNumber placed_to_blk = InvalidBlockNumber;
is_split = gistplacetopage(buildstate->indexrel,
buildstate->freespace,
item->blkno = ItemPointerGetBlockNumber(&it->t_tid);
/*
- * LSN of current page is lsn of parent page for child. We only
- * have a shared lock, so we need to get the LSN atomically.
+ * LSN of current page is lsn of parent page for child. We
+ * only have a shared lock, so we need to get the LSN
+ * atomically.
*/
item->data.parentlsn = BufferGetLSNAtomic(buffer);
}
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
- * whether to keep the current best tuple. If we have done so, and
+ * whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
{
/*
- * Temporary relations are only accessible in our session, so a
- * simple backend-local counter will do.
+ * Temporary relations are only accessible in our session, so a simple
+ * backend-local counter will do.
*/
return counter++;
}
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag. (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
/*
* We need to acquire and hold lock on target page while updating the left
- * child page. If we have a full-page image of target page, getting the
+ * child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
static const struct
{
LOCKMODE hwlock;
- MultiXactStatus lockstatus;
- MultiXactStatus updstatus;
+ MultiXactStatus lockstatus;
+ MultiXactStatus updstatus;
}
-tupleLockExtraInfo[MaxLockTupleMode + 1] =
+
+ tupleLockExtraInfo[MaxLockTupleMode + 1] =
{
- { /* LockTupleKeyShare */
+ { /* LockTupleKeyShare */
AccessShareLock,
MultiXactStatusForKeyShare,
- -1 /* KeyShare does not allow updating tuples */
+ -1 /* KeyShare does not allow updating tuples */
},
- { /* LockTupleShare */
+ { /* LockTupleShare */
RowShareLock,
MultiXactStatusForShare,
- -1 /* Share does not allow updating tuples */
+ -1 /* Share does not allow updating tuples */
},
- { /* LockTupleNoKeyExclusive */
+ { /* LockTupleNoKeyExclusive */
ExclusiveLock,
MultiXactStatusForNoKeyUpdate,
MultiXactStatusNoKeyUpdate
},
- { /* LockTupleExclusive */
+ { /* LockTupleExclusive */
AccessExclusiveLock,
MultiXactStatusForUpdate,
MultiXactStatusUpdate
}
};
+
/* Get the LOCKMODE for a given MultiXactStatus */
#define LOCKMODE_from_mxstatus(status) \
(tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
*/
static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
{
- LockTupleKeyShare, /* ForKeyShare */
- LockTupleShare, /* ForShare */
- LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
- LockTupleExclusive, /* ForUpdate */
- LockTupleNoKeyExclusive, /* NoKeyUpdate */
- LockTupleExclusive /* Update */
+ LockTupleKeyShare, /* ForKeyShare */
+ LockTupleShare, /* ForShare */
+ LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
+ LockTupleExclusive, /* ForUpdate */
+ LockTupleNoKeyExclusive, /* NoKeyUpdate */
+ LockTupleExclusive /* Update */
};
/* Get the LockTupleMode for a given MultiXactStatus */
* page. That's how index-only scans work fine in hot standby. A crucial
* difference between index-only scans and heap scans is that the
* index-only scan completely relies on the visibility map where as heap
- * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if the
- * page-level flag can be trusted in the same way, because it might get
- * propagated somehow without being explicitly WAL-logged, e.g. via a full
- * page write. Until we can prove that beyond doubt, let's check each
+ * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
+ * the page-level flag can be trusted in the same way, because it might
+ * get propagated somehow without being explicitly WAL-logged, e.g. via a
+ * full page write. Until we can prove that beyond doubt, let's check each
* tuple for visibility the hard way.
*/
all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
* tuple. Check for XMIN match.
*/
if (TransactionIdIsValid(priorXmax) &&
- !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
+ !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
{
UnlockReleaseBuffer(buffer);
break;
((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
- /* note we ignore HEAP_XMAX_SHR_LOCK here */
+ /* note we ignore HEAP_XMAX_SHR_LOCK here */
((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
XLHL_KEYS_UPDATED : 0);
}
/*
- * If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd);
switch (result)
{
bool checked_lockers;
bool locker_remains;
TransactionId xmax_new_tuple,
- xmax_old_tuple;
+ xmax_old_tuple;
uint16 infomask_old_tuple,
infomask2_old_tuple,
infomask_new_tuple,
/*
* If we're not updating any "key" column, we can grab a weaker lock type.
- * This allows for more concurrency when we are running simultaneously with
- * foreign key checks.
+ * This allows for more concurrency when we are running simultaneously
+ * with foreign key checks.
*
- * Note that if a column gets detoasted while executing the update, but the
- * value ends up being the same, this test will fail and we will use the
- * stronger lock. This is acceptable; the important case to optimize is
- * updates that don't manipulate key columns, not those that
+ * Note that if a column gets detoasted while executing the update, but
+ * the value ends up being the same, this test will fail and we will use
+ * the stronger lock. This is acceptable; the important case to optimize
+ * is updates that don't manipulate key columns, not those that
* serendipitiously arrive at the same key values.
*/
HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs,
/*
* If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
*/
MultiXactIdSetOldestMember();
}
}
else if (result == HeapTupleBeingUpdated && wait)
{
- TransactionId xwait;
+ TransactionId xwait;
uint16 infomask;
bool can_continue = false;
/*
* XXX note that we don't consider the "no wait" case here. This
* isn't a problem currently because no caller uses that case, but it
- * should be fixed if such a caller is introduced. It wasn't a problem
- * previously because this code would always wait, but now that some
- * tuple locks do not conflict with one of the lock modes we use, it is
- * possible that this case is interesting to handle specially.
+ * should be fixed if such a caller is introduced. It wasn't a
+ * problem previously because this code would always wait, but now
+ * that some tuple locks do not conflict with one of the lock modes we
+ * use, it is possible that this case is interesting to handle
+ * specially.
*
- * This may cause failures with third-party code that calls heap_update
- * directly.
+ * This may cause failures with third-party code that calls
+ * heap_update directly.
*/
/* must copy state data before unlocking buffer */
* gone (or even not sleep at all in some cases); we need to preserve
* it as locker, unless it is gone completely.
*
- * If it's not a multi, we need to check for sleeping conditions before
- * actually going to sleep. If the update doesn't conflict with the
- * locks, we just continue without sleeping (but making sure it is
- * preserved).
+ * If it's not a multi, we need to check for sleeping conditions
+ * before actually going to sleep. If the update doesn't conflict
+ * with the locks, we just continue without sleeping (but making sure
+ * it is preserved).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId update_xact;
- int remain;
+ TransactionId update_xact;
+ int remain;
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, mxact_status, &remain,
goto l2;
/*
- * Note that the multixact may not be done by now. It could have
+ * Note that the multixact may not be done by now. It could have
* surviving members; our own xact or other subxacts of this
* backend, and also any other concurrent transaction that locked
- * the tuple with KeyShare if we only got TupleLockUpdate. If this
- * is the case, we have to be careful to mark the updated tuple
- * with the surviving members in Xmax.
+ * the tuple with KeyShare if we only got TupleLockUpdate. If
+ * this is the case, we have to be careful to mark the updated
+ * tuple with the surviving members in Xmax.
*
- * Note that there could have been another update in the MultiXact.
- * In that case, we need to check whether it committed or aborted.
- * If it aborted we are safe to update it again; otherwise there is
- * an update conflict, and we have to return HeapTupleUpdated
- * below.
+ * Note that there could have been another update in the
+ * MultiXact. In that case, we need to check whether it committed
+ * or aborted. If it aborted we are safe to update it again;
+ * otherwise there is an update conflict, and we have to return
+ * HeapTupleUpdated below.
*
* In the LockTupleExclusive case, we still need to preserve the
* surviving members: those would include the tuple locks we had
else
{
/*
- * If it's just a key-share locker, and we're not changing the
- * key columns, we don't need to wait for it to end; but we
- * need to preserve it as locker.
+ * If it's just a key-share locker, and we're not changing the key
+ * columns, we don't need to wait for it to end; but we need to
+ * preserve it as locker.
*/
if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * recheck the locker; if someone else changed the tuple while we
- * weren't looking, start over.
+ * recheck the locker; if someone else changed the tuple while
+ * we weren't looking, start over.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
can_continue = true;
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * xwait is done, but if xwait had just locked the tuple then some
- * other xact could update this tuple before we get to this point.
- * Check for xmax change, and start over if so.
+ * xwait is done, but if xwait had just locked the tuple then
+ * some other xact could update this tuple before we get to
+ * this point. Check for xmax change, and start over if so.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
/* Otherwise check if it committed or aborted */
* visible while we were busy locking the buffer, or during some
* subsequent window during which we had it unlocked, we'll have to unlock
* and re-lock, to avoid holding the buffer lock across an I/O. That's a
- * bit unfortunate, especially since we'll now have to recheck whether
- * the tuple has been locked or updated under us, but hopefully it won't
+ * bit unfortunate, especially since we'll now have to recheck whether the
+ * tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do a
- * single heap_deform_tuple call on each tuple, instead? But that doesn't
- * work for system columns ...
+ * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
+ * a single heap_deform_tuple call on each tuple, instead? But that
+ * doesn't work for system columns ...
*/
value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
bool *satisfies_hot, bool *satisfies_key,
HeapTuple oldtup, HeapTuple newtup)
{
- int next_hot_attnum;
- int next_key_attnum;
- bool hot_result = true;
- bool key_result = true;
- bool key_done = false;
- bool hot_done = false;
+ int next_hot_attnum;
+ int next_key_attnum;
+ bool hot_result = true;
+ bool key_result = true;
+ bool key_done = false;
+ bool hot_done = false;
next_hot_attnum = bms_first_member(hot_attrs);
if (next_hot_attnum == -1)
for (;;)
{
- int check_now;
- bool changed;
+ int check_now;
+ bool changed;
/* both bitmapsets are now empty */
if (key_done && hot_done)
result = heap_update(relation, otid, tup,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd, &lockmode);
switch (result)
{
static MultiXactStatus
get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
{
- MultiXactStatus retval;
+ MultiXactStatus retval;
if (is_update)
retval = tupleLockExtraInfo[mode].updstatus;
uint16 infomask;
uint16 infomask2;
bool require_sleep;
- ItemPointerData t_ctid;
+ ItemPointerData t_ctid;
/* must copy state data before unlocking buffer */
xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
/*
- * If any subtransaction of the current top transaction already holds a
- * lock as strong or stronger than what we're requesting, we
+ * If any subtransaction of the current top transaction already holds
+ * a lock as strong or stronger than what we're requesting, we
* effectively hold the desired lock already. We *must* succeed
- * without trying to take the tuple lock, else we will deadlock against
- * anyone wanting to acquire a stronger lock.
+ * without trying to take the tuple lock, else we will deadlock
+ * against anyone wanting to acquire a stronger lock.
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- int i;
- int nmembers;
+ int i;
+ int nmembers;
MultiXactMember *members;
/*
- * We don't need to allow old multixacts here; if that had been the
- * case, HeapTupleSatisfiesUpdate would have returned MayBeUpdated
- * and we wouldn't be here.
+ * We don't need to allow old multixacts here; if that had been
+ * the case, HeapTupleSatisfiesUpdate would have returned
+ * MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
{
if (TransactionIdIsCurrentTransactionId(members[i].xid))
{
- LockTupleMode membermode;
+ LockTupleMode membermode;
membermode = TUPLOCK_from_mxstatus(members[i].status);
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
}
else
LockTupleTuplock(relation, tid, mode);
* continue if the key hasn't been modified.
*
* However, if there are updates, we need to walk the update chain
- * to mark future versions of the row as locked, too. That way, if
- * somebody deletes that future version, we're protected against
- * the key going away. This locking of future versions could block
- * momentarily, if a concurrent transaction is deleting a key; or
- * it could return a value to the effect that the transaction
- * deleting the key has already committed. So we do this before
- * re-locking the buffer; otherwise this would be prone to
- * deadlocks.
+ * to mark future versions of the row as locked, too. That way,
+ * if somebody deletes that future version, we're protected
+ * against the key going away. This locking of future versions
+ * could block momentarily, if a concurrent transaction is
+ * deleting a key; or it could return a value to the effect that
+ * the transaction deleting the key has already committed. So we
+ * do this before re-locking the buffer; otherwise this would be
+ * prone to deadlocks.
*
* Note that the TID we're locking was grabbed before we unlocked
- * the buffer. For it to change while we're not looking, the other
- * properties we're testing for below after re-locking the buffer
- * would also change, in which case we would restart this loop
- * above.
+ * the buffer. For it to change while we're not looking, the
+ * other properties we're testing for below after re-locking the
+ * buffer would also change, in which case we would restart this
+ * loop above.
*/
if (!(infomask2 & HEAP_KEYS_UPDATED))
{
- bool updated;
+ bool updated;
updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
/*
- * If there are updates, follow the update chain; bail out
- * if that cannot be done.
+ * If there are updates, follow the update chain; bail out if
+ * that cannot be done.
*/
if (follow_updates && updated)
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
/*
* Make sure it's still an appropriate lock, else start over.
* Also, if it wasn't updated before we released the lock, but
- * is updated now, we start over too; the reason is that we now
- * need to follow the update chain to lock the new versions.
+ * is updated now, we start over too; the reason is that we
+ * now need to follow the update chain to lock the new
+ * versions.
*/
if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
{
/*
* If we're requesting NoKeyExclusive, we might also be able to
- * avoid sleeping; just ensure that there's no other lock type than
- * KeyShare. Note that this is a bit more involved than just
+ * avoid sleeping; just ensure that there's no other lock type
+ * than KeyShare. Note that this is a bit more involved than just
* checking hint bits -- we need to expand the multixact to figure
* out lock modes for each one (unless there was only one such
* locker).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- int nmembers;
+ int nmembers;
MultiXactMember *members;
/*
- * We don't need to allow old multixacts here; if that had been
- * the case, HeapTupleSatisfiesUpdate would have returned
+ * We don't need to allow old multixacts here; if that had
+ * been the case, HeapTupleSatisfiesUpdate would have returned
* MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
if (nmembers <= 0)
{
/*
- * No need to keep the previous xmax here. This is unlikely
- * to happen.
+ * No need to keep the previous xmax here. This is
+ * unlikely to happen.
*/
require_sleep = false;
}
else
{
- int i;
- bool allowed = true;
+ int i;
+ bool allowed = true;
for (i = 0; i < nmembers; i++)
{
/* if the xmax changed in the meantime, start over */
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/* otherwise, we're good */
require_sleep = false;
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
* for xmax change, and start over if so.
*/
if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
* Of course, the multixact might not be done here: if we're
* requesting a light lock mode, other transactions with light
* locks could still be alive, as well as locks owned by our
- * own xact or other subxacts of this backend. We need to
+ * own xact or other subxacts of this backend. We need to
* preserve the surviving MultiXact members. Note that it
* isn't absolutely necessary in the latter case, but doing so
* is simpler.
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * this point. Check for xmax change, and start over if so.
*/
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
- * Otherwise check if it committed or aborted. Note we cannot
+ * Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that should have been handled above. So
* that transaction must necessarily be gone by now.
* for cases where it is a plain TransactionId.
*
* Note in particular that this covers the case where we already hold
- * exclusive lock on the tuple and the caller only wants key share or share
- * lock. It would certainly not do to give up the exclusive lock.
+ * exclusive lock on the tuple and the caller only wants key share or
+ * share lock. It would certainly not do to give up the exclusive lock.
*/
if (!(old_infomask & (HEAP_XMAX_INVALID |
HEAP_XMAX_COMMITTED |
}
/*
- * If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
HeapTupleHeaderSetXmax(tuple->t_data, xid);
/*
- * Make sure there is no forward chain link in t_ctid. Note that in the
+ * Make sure there is no forward chain link in t_ctid. Note that in the
* cases where the tuple has been updated, we must not overwrite t_ctid,
* because it was set by the updater. Moreover, if the tuple has been
- * updated, we need to follow the update chain to lock the new versions
- * of the tuple as well.
+ * updated, we need to follow the update chain to lock the new versions of
+ * the tuple as well.
*/
if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
tuple->t_data->t_ctid = *tid;
TransactionId *result_xmax, uint16 *result_infomask,
uint16 *result_infomask2)
{
- TransactionId new_xmax;
- uint16 new_infomask,
- new_infomask2;
+ TransactionId new_xmax;
+ uint16 new_infomask,
+ new_infomask2;
l5:
new_infomask = 0;
}
else if (old_infomask & HEAP_XMAX_IS_MULTI)
{
- MultiXactStatus new_status;
+ MultiXactStatus new_status;
/*
- * Currently we don't allow XMAX_COMMITTED to be set for multis,
- * so cross-check.
+ * Currently we don't allow XMAX_COMMITTED to be set for multis, so
+ * cross-check.
*/
Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
/*
* If the XMAX is already a MultiXactId, then we need to expand it to
- * include add_to_xmax; but if all the members were lockers and are all
- * gone, we can do away with the IS_MULTI bit and just set add_to_xmax
- * as the only locker/updater. If all lockers are gone and we have an
- * updater that aborted, we can also do without a multi.
+ * include add_to_xmax; but if all the members were lockers and are
+ * all gone, we can do away with the IS_MULTI bit and just set
+ * add_to_xmax as the only locker/updater. If all lockers are gone
+ * and we have an updater that aborted, we can also do without a
+ * multi.
*
* The cost of doing GetMultiXactIdMembers would be paid by
* MultiXactIdExpand if we weren't to do this, so this check is not
* It's a committed update, so we need to preserve him as updater of
* the tuple.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
+
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
* create a new MultiXactId that includes both the old locker or
* updater and our own TransactionId.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
{
{
/*
* LOCK_ONLY can be present alone only when a page has been
- * upgraded by pg_upgrade. But in that case,
- * TransactionIdIsInProgress() should have returned false. We
+ * upgraded by pg_upgrade. But in that case,
+ * TransactionIdIsInProgress() should have returned false. We
* assume it's no longer locked in this case.
*/
elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
*/
if (xmax == add_to_xmax)
{
- LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
- bool old_isupd = ISUPDATE_from_mxstatus(status);
+ LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
+ bool old_isupd = ISUPDATE_from_mxstatus(status);
/*
* We can do this if the new LockTupleMode is higher or equal than
* It's a committed update, so we gotta preserve him as updater of the
* tuple.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
+
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
LockTupleMode mode)
{
- ItemPointerData tupid;
- HeapTupleData mytup;
- Buffer buf;
- uint16 new_infomask,
- new_infomask2,
- old_infomask;
- TransactionId xmax,
- new_xmax;
+ ItemPointerData tupid;
+ HeapTupleData mytup;
+ Buffer buf;
+ uint16 new_infomask,
+ new_infomask2,
+ old_infomask;
+ TransactionId xmax,
+ new_xmax;
ItemPointerCopy(tid, &tupid);
xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
/*
- * If this tuple is updated and the key has been modified (or deleted),
- * what we do depends on the status of the updating transaction: if
- * it's live, we sleep until it finishes; if it has committed, we have
- * to fail (i.e. return HeapTupleUpdated); if it aborted, we ignore it.
- * For updates that didn't touch the key, we can just plough ahead.
+ * If this tuple is updated and the key has been modified (or
+ * deleted), what we do depends on the status of the updating
+ * transaction: if it's live, we sleep until it finishes; if it has
+ * committed, we have to fail (i.e. return HeapTupleUpdated); if it
+ * aborted, we ignore it. For updates that didn't touch the key, we
+ * can just plough ahead.
*/
if (!(old_infomask & HEAP_XMAX_INVALID) &&
(mytup.t_data->t_infomask2 & HEAP_KEYS_UPDATED))
{
- TransactionId update_xid;
+ TransactionId update_xid;
/*
* Note: we *must* check TransactionIdIsInProgress before
goto l4;
}
else if (TransactionIdDidAbort(update_xid))
- ; /* okay to proceed */
+ ; /* okay to proceed */
else if (TransactionIdDidCommit(update_xid))
{
UnlockReleaseBuffer(buf);
{
xl_heap_lock_updated xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.target.node = rel->rd_node;
/* if we find the end of update chain, we're done. */
if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
- ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
+ ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
HeapTupleHeaderIsOnlyLocked(mytup.t_data))
{
UnlockReleaseBuffer(buf);
/*
* heap_lock_updated_tuple
- * Follow update chain when locking an updated tuple, acquiring locks (row
- * marks) on the updated versions.
+ * Follow update chain when locking an updated tuple, acquiring locks (row
+ * marks) on the updated versions.
*
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
- * tuple(s) as locked. If any tuple in the updated chain is being deleted
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
*
{
/*
* If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
*/
MultiXactIdSetOldestMember();
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/*
- * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
- * + LOCKED. Normalize to INVALID just to be sure no one gets
- * confused. Also get rid of the HEAP_KEYS_UPDATED bit.
+ * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * Also get rid of the HEAP_KEYS_UPDATED bit.
*/
tuple->t_infomask &= ~HEAP_XMAX_BITS;
tuple->t_infomask |= HEAP_XMAX_INVALID;
GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
uint16 *new_infomask2)
{
- int nmembers;
- MultiXactMember *members;
- int i;
- uint16 bits = HEAP_XMAX_IS_MULTI;
- uint16 bits2 = 0;
- bool has_update = false;
- LockTupleMode strongest = LockTupleKeyShare;
+ int nmembers;
+ MultiXactMember *members;
+ int i;
+ uint16 bits = HEAP_XMAX_IS_MULTI;
+ uint16 bits2 = 0;
+ bool has_update = false;
+ LockTupleMode strongest = LockTupleKeyShare;
/*
* We only use this in multis we just created, so they cannot be values
for (i = 0; i < nmembers; i++)
{
- LockTupleMode mode;
+ LockTupleMode mode;
/*
* Remember the strongest lock mode held by any member of the
static TransactionId
MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
{
- TransactionId update_xact = InvalidTransactionId;
- MultiXactMember *members;
- int nmembers;
+ TransactionId update_xact = InvalidTransactionId;
+ MultiXactMember *members;
+ int nmembers;
Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
Assert(t_infomask & HEAP_XMAX_IS_MULTI);
/*
- * Since we know the LOCK_ONLY bit is not set, this cannot be a
- * multi from pre-pg_upgrade.
+ * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
+ * pre-pg_upgrade.
*/
nmembers = GetMultiXactIdMembers(xmax, &members, false);
if (nmembers > 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
members[i].status == MultiXactStatusUpdate);
update_xact = members[i].xid;
#ifndef USE_ASSERT_CHECKING
+
/*
* in an assert-enabled build, walk the whole array to ensure
* there's no other updater.
/*
* HeapTupleGetUpdateXid
- * As above, but use a HeapTupleHeader
+ * As above, but use a HeapTupleHeader
*
* See also HeapTupleHeaderGetUpdateXid, which can be used without previously
* checking the hint bits.
/*
* Do_MultiXactIdWait
- * Actual implementation for the two functions below.
+ * Actual implementation for the two functions below.
*
* We do this by sleeping on each member using XactLockTableWait. Any
* members that belong to the current backend are *not* waited for, however;
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
+ * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.
{
xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
TransactionId cutoff_xid = xlrec->cutoff_xid;
- MultiXactId cutoff_multi = xlrec->cutoff_multi;
+ MultiXactId cutoff_multi = xlrec->cutoff_multi;
Buffer buffer;
Page page;
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
goto newt;
page = (Page) BufferGetPage(obuffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
if (samepage)
{
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_lock_updated *xlrec =
- (xl_heap_lock_updated *) XLogRecGetData(record);
+ (xl_heap_lock_updated *) XLogRecGetData(record);
Buffer buffer;
Page page;
OffsetNumber offnum;
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
* determine tuple visibility */
TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
* point */
- MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
+ MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
* cutoff point for multixacts */
MemoryContext rs_cxt; /* for hash tables and entries and tuples in
* them */
*/
if (DataChecksumsEnabled())
{
- Page heapPage = BufferGetPage(heapBuf);
+ Page heapPage = BufferGetPage(heapBuf);
/* caller is expected to set PD_ALL_VISIBLE first */
Assert(PageIsAllVisible(heapPage));
START_CRIT_SECTION();
/*
- * We don't do MarkBufferDirty here because we're about to initialise
- * the page, and nobody else can see it yet.
+ * We don't do MarkBufferDirty here because we're about to initialise the
+ * page, and nobody else can see it yet.
*/
/* XLOG stuff */
XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
/*
- * We don't do PageSetLSN here because we're about to initialise
- * the page, so no need.
+ * We don't do PageSetLSN here because we're about to initialise the
+ * page, so no need.
*/
}
* Note that this code ensures that the items remaining on the
* left page are in the correct item number order, but it does not
* reproduce the physical order they would have had. Is this
- * worth changing? See also _bt_restore_page().
+ * worth changing? See also _bt_restore_page().
*/
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
/*
* In what follows, we have to examine the previous state of the index
- * page, as well as the heap page(s) it points to. This is only valid if
+ * page, as well as the heap page(s) it points to. This is only valid if
* WAL replay has reached a consistent database state; which means that
- * the preceding check is not just an optimization, but is *necessary*.
- * We won't have let in any user sessions before we reach consistency.
+ * the preceding check is not just an optimization, but is *necessary*. We
+ * won't have let in any user sessions before we reach consistency.
*/
if (!reachedConsistency)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
- * Get index page. If the DB is consistent, this should not fail, nor
+ * Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
- * InvalidTransactionId to cancel all HS transactions. That's probably
+ * InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
/*
* XXX If all heap tuples were LP_DEAD then we will be returning
- * InvalidTransactionId here, causing conflict for all HS
- * transactions. That should happen very rarely (reasoning please?). Also
- * note that caller can't tell the difference between this case and the
- * fast path exit above. May need to change that in future.
+ * InvalidTransactionId here, causing conflict for all HS transactions.
+ * That should happen very rarely (reasoning please?). Also note that
+ * caller can't tell the difference between this case and the fast path
+ * exit above. May need to change that in future.
*/
return latestRemovedXid;
}
* If we have any conflict processing to do, it must happen before we
* update the page.
*
- * Btree delete records can conflict with standby queries. You might
+ * Btree delete records can conflict with standby queries. You might
* think that vacuum records would conflict as well, but we've handled
* that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
* cleaned by the vacuum of the heap and so we can resolve any conflicts
/*-------------------------------------------------------------------------
*
* clogdesc.c
- * rmgr descriptor routines for access/transam/clog.c
+ * rmgr descriptor routines for access/transam/clog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/clogdesc.c
+ * src/backend/access/rmgrdesc/clogdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* dbasedesc.c
- * rmgr descriptor routines for commands/dbcommands.c
+ * rmgr descriptor routines for commands/dbcommands.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/dbasedesc.c
+ * src/backend/access/rmgrdesc/dbasedesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* gindesc.c
- * rmgr descriptor routines for access/transam/gin/ginxlog.c
+ * rmgr descriptor routines for access/transam/gin/ginxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/gindesc.c
+ * src/backend/access/rmgrdesc/gindesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* gistdesc.c
- * rmgr descriptor routines for access/gist/gistxlog.c
+ * rmgr descriptor routines for access/gist/gistxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/gistdesc.c
+ * src/backend/access/rmgrdesc/gistdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* hashdesc.c
- * rmgr descriptor routines for access/hash/hash.c
+ * rmgr descriptor routines for access/hash/hash.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/hashdesc.c
+ * src/backend/access/rmgrdesc/hashdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* heapdesc.c
- * rmgr descriptor routines for access/heap/heapam.c
+ * rmgr descriptor routines for access/heap/heapam.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/heapdesc.c
+ * src/backend/access/rmgrdesc/heapdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* mxactdesc.c
- * rmgr descriptor routines for access/transam/multixact.c
+ * rmgr descriptor routines for access/transam/multixact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/mxactdesc.c
+ * src/backend/access/rmgrdesc/mxactdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* nbtdesc.c
- * rmgr descriptor routines for access/nbtree/nbtxlog.c
+ * rmgr descriptor routines for access/nbtree/nbtxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/nbtdesc.c
+ * src/backend/access/rmgrdesc/nbtdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* relmapdesc.c
- * rmgr descriptor routines for utils/cache/relmapper.c
+ * rmgr descriptor routines for utils/cache/relmapper.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/relmapdesc.c
+ * src/backend/access/rmgrdesc/relmapdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* seqdesc.c
- * rmgr descriptor routines for commands/sequence.c
+ * rmgr descriptor routines for commands/sequence.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/seqdesc.c
+ * src/backend/access/rmgrdesc/seqdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* smgrdesc.c
- * rmgr descriptor routines for catalog/storage.c
+ * rmgr descriptor routines for catalog/storage.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/smgrdesc.c
+ * src/backend/access/rmgrdesc/smgrdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* spgdesc.c
- * rmgr descriptor routines for access/spgist/spgxlog.c
+ * rmgr descriptor routines for access/spgist/spgxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/spgdesc.c
+ * src/backend/access/rmgrdesc/spgdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* standbydesc.c
- * rmgr descriptor routines for storage/ipc/standby.c
+ * rmgr descriptor routines for storage/ipc/standby.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/standbydesc.c
+ * src/backend/access/rmgrdesc/standbydesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* tblspcdesc.c
- * rmgr descriptor routines for commands/tablespace.c
+ * rmgr descriptor routines for commands/tablespace.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/tblspcdesc.c
+ * src/backend/access/rmgrdesc/tblspcdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* xactdesc.c
- * rmgr descriptor routines for access/transam/xact.c
+ * rmgr descriptor routines for access/transam/xact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/xactdesc.c
+ * src/backend/access/rmgrdesc/xactdesc.c
*
*-------------------------------------------------------------------------
*/
/*-------------------------------------------------------------------------
*
* xlogdesc.c
- * rmgr descriptor routines for access/transam/xlog.c
+ * rmgr descriptor routines for access/transam/xlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/xlogdesc.c
+ * src/backend/access/rmgrdesc/xlogdesc.c
*
*-------------------------------------------------------------------------
*/
"tli %u; prev tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest multi %u in DB %u; "
"oldest running xid %u; %s",
- (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
+ (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
checkpoint->ThisTimeLineID,
checkpoint->PrevTimeLineID,
checkpoint->fullPageWrites ? "true" : "false",
}
else if (info == XLOG_HINT)
{
- BkpBlock *bkp = (BkpBlock *) rec;
+ BkpBlock *bkp = (BkpBlock *) rec;
+
appendStringInfo(buf, "page hint: %s block %u",
relpathperm(bkp->node, bkp->fork),
bkp->block);
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
- * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
+ * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the
*
* The pg_multixact manager is a pg_clog-like manager that stores an array of
* MultiXactMember for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. Each MultiXactMember is comprised of a
+ * shared-row-lock implementation. Each MultiXactMember is comprised of a
* TransactionId and a set of flag bits. The name is a bit historical:
* originally, a MultiXactId consisted of more than one TransactionId (except
* in rare corner cases), hence "multi". Nowadays, however, it's perfectly
* The minimum value in each database is stored in pg_database, and the
* global minimum is part of pg_control. Any vacuum that is able to
* advance its database's minimum value also computes a new global minimum,
- * and uses this value to truncate older segments. When new multixactid
+ * and uses this value to truncate older segments. When new multixactid
* values are to be created, care is taken that the counter does not
* fall within the wraparound horizon considering the global minimum value.
*
* additional flag bits for each TransactionId. To do this without getting
* into alignment issues, we store four bytes of flags, and then the
* corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and
- * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
+ * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* per page. This wastes 12 bytes per page, but that's OK -- simplicity (and
* performance) trumps space efficiency here.
*
MultiXactId lastTruncationPoint;
/*
- * oldest multixact that is still on disk. Anything older than this should
- * not be consulted.
+ * oldest multixact that is still on disk. Anything older than this
+ * should not be consulted.
*/
- MultiXactId oldestMultiXactId;
- Oid oldestMultiXactDB;
+ MultiXactId oldestMultiXactId;
+ Oid oldestMultiXactDB;
/* support for anti-wraparound measures */
- MultiXactId multiVacLimit;
- MultiXactId multiWarnLimit;
- MultiXactId multiStopLimit;
- MultiXactId multiWrapLimit;
+ MultiXactId multiVacLimit;
+ MultiXactId multiWarnLimit;
+ MultiXactId multiStopLimit;
+ MultiXactId multiWrapLimit;
/*
* Per-backend data starts here. We have two arrays stored in the area
* so they will be uninteresting by the time our next transaction starts.
* (XXX not clear that this is correct --- other members of the MultiXact
* could hang around longer than we did. However, it's not clear what a
- * better policy for flushing old cache entries would be.) FIXME actually
+ * better policy for flushing old cache entries would be.) FIXME actually
* this is plain wrong now that multixact's may contain update Xids.
*
* We allocate the cache entries in a memory context that is deleted at
static MultiXactId GetNewMultiXactId(int nmembers, MultiXactOffset *offset);
/* MultiXact cache management */
-static int mxactMemberComparator(const void *arg1, const void *arg2);
+static int mxactMemberComparator(const void *arg1, const void *arg2);
static MultiXactId mXactCacheGetBySet(int nmembers, MultiXactMember *members);
static int mXactCacheGetById(MultiXactId multi, MultiXactMember **members);
static void mXactCachePut(MultiXactId multi, int nmembers,
multi, xid, mxstatus_to_string(status));
/*
- * Note: we don't allow for old multis here. The reason is that the
- * only caller of this function does a check that the multixact is
- * no longer running.
+ * Note: we don't allow for old multis here. The reason is that the only
+ * caller of this function does a check that the multixact is no longer
+ * running.
*/
nmembers = GetMultiXactIdMembers(multi, &members, false);
if (nmembers < 0)
{
- MultiXactMember member;
+ MultiXactMember member;
/*
* The MultiXactId is obsolete. This can only happen if all the
}
/*
- * Determine which of the members of the MultiXactId are still of interest.
- * This is any running transaction, and also any transaction that grabbed
- * something stronger than just a lock and was committed. (An update that
- * aborted is of no interest here.)
+ * Determine which of the members of the MultiXactId are still of
+ * interest. This is any running transaction, and also any transaction
+ * that grabbed something stronger than just a lock and was committed.
+ * (An update that aborted is of no interest here.)
*
- * (Removing dead members is just an optimization, but a useful one.
- * Note we have the same race condition here as above: j could be 0 at the
- * end of the loop.)
+ * (Removing dead members is just an optimization, but a useful one. Note
+ * we have the same race condition here as above: j could be 0 at the end
+ * of the loop.)
*/
newMembers = (MultiXactMember *)
palloc(sizeof(MultiXactMember) * (nmembers + 1));
/*
* ReadNextMultiXactId
- * Return the next MultiXactId to be assigned, but don't allocate it
+ * Return the next MultiXactId to be assigned, but don't allocate it
*/
MultiXactId
ReadNextMultiXactId(void)
{
- MultiXactId mxid;
+ MultiXactId mxid;
/* XXX we could presumably do this without a lock. */
LWLockAcquire(MultiXactGenLock, LW_SHARED);
/*
* XXX Note: there's a lot of padding space in MultiXactMember. We could
- * find a more compact representation of this Xlog record -- perhaps all the
- * status flags in one XLogRecData, then all the xids in another one? Not
- * clear that it's worth the trouble though.
+ * find a more compact representation of this Xlog record -- perhaps all
+ * the status flags in one XLogRecData, then all the xids in another one?
+ * Not clear that it's worth the trouble though.
*/
rdata[0].data = (char *) (&xlrec);
rdata[0].len = SizeOfMultiXactCreate;
/*----------
* Check to see if it's safe to assign another MultiXactId. This protects
- * against catastrophic data loss due to multixact wraparound. The basic
+ * against catastrophic data loss due to multixact wraparound. The basic
* rules are:
*
* If we're past multiVacLimit, start trying to force autovacuum cycles.
{
/*
* For safety's sake, we release MultiXactGenLock while sending
- * signals, warnings, etc. This is not so much because we care about
+ * signals, warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
* possibility of deadlock while doing get_database_name(). First,
* copy all the shared values we'll need in this path.
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database \"%s\"",
oldest_datname),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
else
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u",
oldest_datoid),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
}
else if (!MultiXactIdPrecedes(result, multiWarnLimit))
{
(errmsg("database \"%s\" must be vacuumed before %u more MultiXactIds are used",
oldest_datname,
multiWrapLimit - result),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
else
ereport(WARNING,
(errmsg("database with OID %u must be vacuumed before %u more MultiXactIds are used",
oldest_datoid,
multiWrapLimit - result),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
}
/* Re-acquire lock and start over */
*
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
- * or the first value on a segment-beginning page after this routine exits,
- * so anyone else looking at the variable must be prepared to deal with
- * either case. Similarly, nextOffset may be zero, but we won't use that
- * as the actual start offset of the next multixact.
+ * or the first value on a segment-beginning page after this routine
+ * exits, so anyone else looking at the variable must be prepared to deal
+ * with either case. Similarly, nextOffset may be zero, but we won't use
+ * that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it should have already been frozen by vacuum. We've truncated
- * the on-disk structures anyway. Returning the wrong values could lead to
- * an incorrect visibility result. However, to support pg_upgrade we need
- * to allow an empty set to be returned regardless, if the caller is
+ * the on-disk structures anyway. Returning the wrong values could lead
+ * to an incorrect visibility result. However, to support pg_upgrade we
+ * need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
+ * tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. This raises
- * a hard error.
+ * seen, it implies undetected ID wraparound has occurred. This raises a
+ * hard error.
*
* Shared lock is enough here since we aren't modifying any global state.
* Acquire it just long enough to grab the current counter values. We may
{
ereport(allow_old ? DEBUG1 : ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
- multi)));
+ errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
+ multi)));
return -1;
}
memcpy(ptr, entry->members, size);
debug_elog3(DEBUG2, "CacheGet: found %s",
- mxid_to_string(multi, entry->nmembers, entry->members));
+ mxid_to_string(multi, entry->nmembers, entry->members));
return entry->nmembers;
}
}
char *
mxid_to_string(MultiXactId multi, int nmembers, MultiXactMember *members)
{
- static char *str = NULL;
- StringInfoData buf;
+ static char *str = NULL;
+ StringInfoData buf;
int i;
if (str != NULL)
*
* StartupXLOG has already established nextMXact/nextOffset by calling
* MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact, and the oldestMulti
- * info from pg_control and/or MultiXactAdvanceOldest. Note that we may
+ * info from pg_control and/or MultiXactAdvanceOldest. Note that we may
* already have replayed WAL data into the SLRU files.
*
* We don't need any locks here, really; the SLRU locks are taken
void
SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
{
- MultiXactId multiVacLimit;
- MultiXactId multiWarnLimit;
- MultiXactId multiStopLimit;
- MultiXactId multiWrapLimit;
- MultiXactId curMulti;
+ MultiXactId multiVacLimit;
+ MultiXactId multiWarnLimit;
+ MultiXactId multiStopLimit;
+ MultiXactId multiWrapLimit;
+ MultiXactId curMulti;
Assert(MultiXactIdIsValid(oldest_datminmxid));
/*
* The place where we actually get into deep trouble is halfway around
- * from the oldest potentially-existing XID/multi. (This calculation is
+ * from the oldest potentially-existing XID/multi. (This calculation is
* probably off by one or two counts for Xids, because the special XIDs
* reduce the size of the loop a little bit. But we throw in plenty of
* slop below, so it doesn't matter.)
multiStopLimit -= FirstMultiXactId;
/*
- * We'll start complaining loudly when we get within 10M multis of the stop
- * point. This is kind of arbitrary, but if you let your gas gauge get
- * down to 1% of full, would you be looking for the next gas station? We
- * need to be fairly liberal about this number because there are lots of
- * scenarios where most transactions are done by automatic clients that
+ * We'll start complaining loudly when we get within 10M multis of the
+ * stop point. This is kind of arbitrary, but if you let your gas gauge
+ * get down to 1% of full, would you be looking for the next gas station?
+ * We need to be fairly liberal about this number because there are lots
+ * of scenarios where most transactions are done by automatic clients that
* won't pay attention to warnings. (No, we're not gonna make this
* configurable. If you know enough to configure it, you know enough to
* not get in this kind of trouble in the first place.)
multiWarnLimit -= FirstMultiXactId;
/*
- * We'll start trying to force autovacuums when oldest_datminmxid gets
- * to be more than 200 million transactions old.
+ * We'll start trying to force autovacuums when oldest_datminmxid gets to
+ * be more than 200 million transactions old.
*/
multiVacLimit = oldest_datminmxid + 200000000;
if (multiVacLimit < FirstMultiXactId)
/* Log the info */
ereport(DEBUG1,
- (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
- multiWrapLimit, oldest_datoid)));
+ (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
+ multiWrapLimit, oldest_datoid)));
/*
* If past the autovacuum force point, immediately signal an autovac
MultiXactId
GetOldestMultiXactId(void)
{
- MultiXactId oldestMXact;
- MultiXactId nextMXact;
- int i;
+ MultiXactId oldestMXact;
+ MultiXactId nextMXact;
+ int i;
/*
* This is the oldest valid value among all the OldestMemberMXactId[] and
typedef struct mxtruncinfo
{
- int earliestExistingPage;
+ int earliestExistingPage;
} mxtruncinfo;
/*
* SlruScanDirectory callback
- * This callback determines the earliest existing page number.
+ * This callback determines the earliest existing page number.
*/
static bool
SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
{
- mxtruncinfo *trunc = (mxtruncinfo *) data;
+ mxtruncinfo *trunc = (mxtruncinfo *) data;
if (trunc->earliestExistingPage == -1 ||
ctl->PagePrecedes(segpage, trunc->earliestExistingPage))
trunc->earliestExistingPage = segpage;
}
- return false; /* keep going */
+ return false; /* keep going */
}
/*
void
TruncateMultiXact(MultiXactId oldestMXact)
{
- MultiXactOffset oldestOffset;
- mxtruncinfo trunc;
- MultiXactId earliest;
+ MultiXactOffset oldestOffset;
+ mxtruncinfo trunc;
+ MultiXactId earliest;
/*
* Note we can't just plow ahead with the truncation; it's possible that
* there are no segments to truncate, which is a problem because we are
- * going to attempt to read the offsets page to determine where to truncate
- * the members SLRU. So we first scan the directory to determine the
- * earliest offsets page number that we can read without error.
+ * going to attempt to read the offsets page to determine where to
+ * truncate the members SLRU. So we first scan the directory to determine
+ * the earliest offsets page number that we can read without error.
*/
trunc.earliestExistingPage = -1;
SlruScanDirectory(MultiXactOffsetCtl, SlruScanDirCbFindEarliest, &trunc);
return;
/*
- * First, compute the safe truncation point for MultiXactMember.
- * This is the starting offset of the multixact we were passed
- * as MultiXactOffset cutoff.
+ * First, compute the safe truncation point for MultiXactMember. This is
+ * the starting offset of the multixact we were passed as MultiXactOffset
+ * cutoff.
*/
{
int pageno;
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
xl_multixact_create *xlrec =
- (xl_multixact_create *) XLogRecGetData(record);
+ (xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid;
int i;
{
typedef struct
{
- MultiXactMember *members;
- int nmembers;
- int iter;
+ MultiXactMember *members;
+ int nmembers;
+ int iter;
} mxact;
- MultiXactId mxid = PG_GETARG_UINT32(0);
- mxact *multi;
+ MultiXactId mxid = PG_GETARG_UINT32(0);
+ mxact *multi;
FuncCallContext *funccxt;
if (mxid < FirstMultiXactId)
* <parentTLI> <switchpoint> <reason>
*
* parentTLI ID of the parent timeline
- * switchpoint XLogRecPtr of the WAL position where the switch happened
+ * switchpoint XLogRecPtr of the WAL position where the switch happened
* reason human-readable explanation of why the timeline was changed
*
* The fields are separated by tabs. Lines beginning with # are comments, and
{
char path[MAXPGPATH];
char histfname[MAXFNAMELEN];
- TimeLineID tli;
+ TimeLineID tli;
for (tli = begin; tli < end; tli++)
{
errhint("Timeline IDs must be less than child timeline's ID.")));
/*
- * Create one more entry for the "tip" of the timeline, which has no
- * entry in the history file.
+ * Create one more entry for the "tip" of the timeline, which has no entry
+ * in the history file.
*/
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry->tli = targetTLI;
/*
* Prefer link() to rename() here just to be really sure that we don't
- * overwrite an existing file. However, there shouldn't be one, so
+ * overwrite an existing file. However, there shouldn't be one, so
* rename() is an acceptable substitute except for the truly paranoid.
*/
#if HAVE_WORKING_LINK
bool
tliInHistory(TimeLineID tli, List *expectedTLEs)
{
- ListCell *cell;
+ ListCell *cell;
foreach(cell, expectedTLEs)
{
TimeLineID
tliOfPointInHistory(XLogRecPtr ptr, List *history)
{
- ListCell *cell;
+ ListCell *cell;
foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
+
if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) &&
(XLogRecPtrIsInvalid(tle->end) || ptr < tle->end))
{
/* shouldn't happen. */
elog(ERROR, "timeline history was not contiguous");
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
}
/*
if (nextTLI)
*nextTLI = 0;
- foreach (cell, history)
+ foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
ereport(ERROR,
(errmsg("requested timeline %u is not in this server's history",
tli)));
- return InvalidXLogRecPtr; /* keep compiler quiet */
+ return InvalidXLogRecPtr; /* keep compiler quiet */
}
*
* It's safe to change the delayChkpt flag of our own backend without
* holding the ProcArrayLock, since we're the only one modifying it.
- * This makes checkpoint's determination of which xacts are delayChkpt a
- * bit fuzzy, but it doesn't matter.
+ * This makes checkpoint's determination of which xacts are delayChkpt
+ * a bit fuzzy, but it doesn't matter.
*/
START_CRIT_SECTION();
MyPgXact->delayChkpt = true;
* from the template database, and then commit the transaction. If we
* crash after all the files have been copied but before the commit, you
* have files in the data directory without an entry in pg_database. To
- * minimize the window
- * for that, we use ForceSyncCommit() to rush the commit record to disk as
- * quick as possible. We have the same window during recovery, and forcing
- * an XLogFlush() (which updates minRecoveryPoint during recovery) helps
- * to reduce that problem window, for any user that requested
- * ForceSyncCommit().
+ * minimize the window for that, we use ForceSyncCommit() to rush the
+ * commit record to disk as quick as possible. We have the same window
+ * during recovery, and forcing an XLogFlush() (which updates
+ * minRecoveryPoint during recovery) helps to reduce that problem window,
+ * for any user that requested ForceSyncCommit().
*/
if (XactCompletionForceSyncCommit(xinfo))
XLogFlush(lsn);
* will switch to using offline XLOG archives as soon as we reach the end of
* WAL in pg_xlog.
*/
-bool ArchiveRecoveryRequested = false;
-bool InArchiveRecovery = false;
+bool ArchiveRecoveryRequested = false;
+bool InArchiveRecovery = false;
/* Was the last xlog file restored from archive, or local? */
static bool restoredFromArchive = false;
/* options taken from recovery.conf for archive recovery */
-char *recoveryRestoreCommand = NULL;
+char *recoveryRestoreCommand = NULL;
static char *recoveryEndCommand = NULL;
static char *archiveCleanupCommand = NULL;
static RecoveryTargetType recoveryTarget = RECOVERY_TARGET_UNSET;
static char *TriggerFile = NULL;
/* are we currently in standby mode? */
-bool StandbyMode = false;
+bool StandbyMode = false;
/* whether request for fast promotion has been made yet */
static bool fast_promote = false;
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
- XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
+ XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG
+ * segment */
/* Fake LSN counter, for unlogged relations. Protected by ulsn_lck */
- XLogRecPtr unloggedLSN;
+ XLogRecPtr unloggedLSN;
slock_t ulsn_lck;
/* Protected by WALWriteLock: */
*/
typedef enum
{
- XLOG_FROM_ANY = 0, /* request to read WAL from any source */
- XLOG_FROM_ARCHIVE, /* restored using restore_command */
- XLOG_FROM_PG_XLOG, /* existing file in pg_xlog */
- XLOG_FROM_STREAM, /* streamed from master */
+ XLOG_FROM_ANY = 0, /* request to read WAL from any source */
+ XLOG_FROM_ARCHIVE, /* restored using restore_command */
+ XLOG_FROM_PG_XLOG, /* existing file in pg_xlog */
+ XLOG_FROM_STREAM, /* streamed from master */
} XLogSource;
/* human-readable names for XLogSources, for debugging output */
-static const char *xlogSourceNames[] = { "any", "archive", "pg_xlog", "stream" };
+static const char *xlogSourceNames[] = {"any", "archive", "pg_xlog", "stream"};
/*
* openLogFile is -1 or a kernel FD for an open log file segment.
* next.
*/
static XLogSource currentSource = 0; /* XLOG_FROM_* code */
-static bool lastSourceFailed = false;
+static bool lastSourceFailed = false;
typedef struct XLogPageReadPrivate
{
* XLogReceiptSource tracks where we last successfully read some WAL.)
*/
static TimestampTz XLogReceiptTime = 0;
-static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
+static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr; /* start of last record read */
static bool XLogCheckBuffer(XLogRecData *rdata, bool holdsExclusiveLock,
XLogRecPtr *lsn, BkpBlock *bkpb);
static Buffer RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb,
- char *blk, bool get_cleanup_lock, bool keep_buffer);
+ char *blk, bool get_cleanup_lock, bool keep_buffer);
static bool AdvanceXLInsertBuffer(bool new_segment);
static bool XLogCheckpointNeeded(XLogSegNo new_segno);
static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch);
bool use_lock);
static int XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli,
int source, bool notexistOk);
-static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
+static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
static int XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
int reqLen, XLogRecPtr targetRecPtr, char *readBuf,
TimeLineID *readTLI);
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
if (doPageWrites && XLogCheckBuffer(rdt, true,
- &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
+ &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
{
dtbuf_bkp[i] = true;
rdt->data = NULL;
page = BufferGetPage(rdata->buffer);
/*
- * We assume page LSN is first data on *every* page that can be passed
- * to XLogInsert, whether it has the standard page layout or not. We
- * don't need to take the buffer header lock for PageGetLSN if we hold
- * an exclusive lock on the page and/or the relation.
+ * We assume page LSN is first data on *every* page that can be passed to
+ * XLogInsert, whether it has the standard page layout or not. We don't
+ * need to take the buffer header lock for PageGetLSN if we hold an
+ * exclusive lock on the page and/or the relation.
*/
if (holdsExclusiveLock)
*lsn = PageGetLSN(page);
*/
if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx])
elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
- (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
+ (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
(uint32) (XLogCtl->xlblocks[curridx] >> 32),
(uint32) XLogCtl->xlblocks[curridx]);
if (lseek(openLogFile, (off_t) startoffset, SEEK_SET) < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not seek in log file %s to offset %u: %m",
- XLogFileNameP(ThisTimeLineID, openLogSegNo),
- startoffset)));
+ errmsg("could not seek in log file %s to offset %u: %m",
+ XLogFileNameP(ThisTimeLineID, openLogSegNo),
+ startoffset)));
openLogOff = startoffset;
}
if (!force && newMinRecoveryPoint < lsn)
elog(WARNING,
"xlog min recovery request %X/%X is past current point %X/%X",
- (uint32) (lsn >> 32) , (uint32) lsn,
+ (uint32) (lsn >> 32), (uint32) lsn,
(uint32) (newMinRecoveryPoint >> 32),
(uint32) newMinRecoveryPoint);
minRecoveryPointTLI = newMinRecoveryPointTLI;
ereport(DEBUG2,
- (errmsg("updated min recovery point to %X/%X on timeline %u",
- (uint32) (minRecoveryPoint >> 32),
- (uint32) minRecoveryPoint,
- newMinRecoveryPointTLI)));
+ (errmsg("updated min recovery point to %X/%X on timeline %u",
+ (uint32) (minRecoveryPoint >> 32),
+ (uint32) minRecoveryPoint,
+ newMinRecoveryPointTLI)));
}
}
LWLockRelease(ControlFileLock);
elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
(uint32) (record >> 32), (uint32) record,
(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
#endif
START_CRIT_SECTION();
/*
* Sleep before flush! By adding a delay here, we may give further
* backends the opportunity to join the backlog of group commit
- * followers; this can significantly improve transaction throughput, at
- * the risk of increasing transaction latency.
+ * followers; this can significantly improve transaction throughput,
+ * at the risk of increasing transaction latency.
*
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active transactions.
XLogCtlInsert *Insert = &XLogCtl->Insert;
uint32 freespace = INSERT_FREESPACE(Insert);
- if (freespace == 0) /* buffer is full */
+ if (freespace == 0) /* buffer is full */
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
else
{
elog(ERROR,
"xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
(uint32) (record >> 32), (uint32) record,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
}
/*
elog(LOG, "xlog bg flush request %X/%X; write %X/%X; flush %X/%X",
(uint32) (WriteRqstPtr >> 32), (uint32) WriteRqstPtr,
(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
#endif
START_CRIT_SECTION();
if (fd < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", path)));
+ errmsg("could not open file \"%s\": %m", path)));
elog(DEBUG2, "done creating and filling new WAL file");
* want to read.
*
* If we haven't read the timeline history file yet, read it now, so that
- * we know which TLIs to scan. We don't save the list in expectedTLEs,
+ * we know which TLIs to scan. We don't save the list in expectedTLEs,
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
}
/*
- * The checksum value on this page is currently invalid. We don't
- * need to reset it here since it will be set before being written.
+ * The checksum value on this page is currently invalid. We don't need to
+ * reset it here since it will be set before being written.
*/
PageSetLSN(page, lsn);
for (;;)
{
- char *errormsg;
+ char *errormsg;
record = XLogReadRecord(xlogreader, RecPtr, &errormsg);
ReadRecPtr = xlogreader->ReadRecPtr;
}
/*
- * We only end up here without a message when XLogPageRead() failed
- * - in that case we already logged something.
- * In StandbyMode that only happens if we have been triggered, so
- * we shouldn't loop anymore in that case.
+ * We only end up here without a message when XLogPageRead()
+ * failed - in that case we already logged something. In
+ * StandbyMode that only happens if we have been triggered, so we
+ * shouldn't loop anymore in that case.
*/
if (errormsg)
ereport(emode_for_corrupt_record(emode,
RecPtr ? RecPtr : EndRecPtr),
- (errmsg_internal("%s", errormsg) /* already translated */));
+ (errmsg_internal("%s", errormsg) /* already translated */ ));
}
+
/*
* Check page TLI is one of the expected values.
*/
else if (!tliInHistory(xlogreader->latestPageTLI, expectedTLEs))
{
char fname[MAXFNAMELEN];
- XLogSegNo segno;
- int32 offset;
+ XLogSegNo segno;
+ int32 offset;
XLByteToSeg(xlogreader->latestPagePtr, segno);
offset = xlogreader->latestPagePtr % XLogSegSize;
XLogFileName(fname, xlogreader->readPageTLI, segno);
ereport(emode_for_corrupt_record(emode,
RecPtr ? RecPtr : EndRecPtr),
- (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
- xlogreader->latestPageTLI,
- fname,
- offset)));
+ (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
+ xlogreader->latestPageTLI,
+ fname,
+ offset)));
record = NULL;
}
lastSourceFailed = true;
/*
- * If archive recovery was requested, but we were still doing crash
- * recovery, switch to archive recovery and retry using the offline
- * archive. We have now replayed all the valid WAL in pg_xlog, so
- * we are presumably now consistent.
+ * If archive recovery was requested, but we were still doing
+ * crash recovery, switch to archive recovery and retry using the
+ * offline archive. We have now replayed all the valid WAL in
+ * pg_xlog, so we are presumably now consistent.
*
* We require that there's at least some valid WAL present in
* pg_xlog, however (!fetch_ckpt). We could recover using the WAL
newExpectedTLEs = readTimeLineHistory(newtarget);
/*
- * If the current timeline is not part of the history of the new
- * timeline, we cannot proceed to it.
+ * If the current timeline is not part of the history of the new timeline,
+ * we cannot proceed to it.
*/
found = false;
- foreach (cell, newExpectedTLEs)
+ foreach(cell, newExpectedTLEs)
{
currentTle = (TimeLineHistoryEntry *) lfirst(cell);
XLogRecPtr
GetFakeLSNForUnloggedRel(void)
{
- XLogRecPtr nextUnloggedLSN;
+ XLogRecPtr nextUnloggedLSN;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating an XLog reading processor")));
+ errdetail("Failed while allocating an XLog reading processor")));
xlogreader->system_identifier = ControlFile->system_identifier;
if (read_backup_label(&checkPointLoc, &backupEndRequired,
&backupFromStandby))
{
/*
- * Archive recovery was requested, and thanks to the backup label file,
- * we know how far we need to replay to reach consistency. Enter
+ * Archive recovery was requested, and thanks to the backup label
+ * file, we know how far we need to replay to reach consistency. Enter
* archive recovery directly.
*/
InArchiveRecovery = true;
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
/*
/*
* It's possible that archive recovery was requested, but we don't
* know how far we need to replay the WAL before we reach consistency.
- * This can happen for example if a base backup is taken from a running
- * server using an atomic filesystem snapshot, without calling
+ * This can happen for example if a base backup is taken from a
+ * running server using an atomic filesystem snapshot, without calling
* pg_start/stop_backup. Or if you just kill a running master server
* and put it into archive recovery by creating a recovery.conf file.
*
* replaying all the WAL present in pg_xlog, and only enter archive
* recovery after that.
*
- * But usually we already know how far we need to replay the WAL (up to
- * minRecoveryPoint, up to backupEndPoint, or until we see an
+ * But usually we already know how far we need to replay the WAL (up
+ * to minRecoveryPoint, up to backupEndPoint, or until we see an
* end-of-backup record), and we can enter archive recovery directly.
*/
if (ArchiveRecoveryRequested &&
{
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
}
else if (StandbyMode)
{
{
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
* timeline in the history of the requested timeline, we cannot proceed:
* the backup is not part of the history of the requested timeline.
*/
- Assert(expectedTLEs); /* was initialized by reading checkpoint record */
+ Assert(expectedTLEs); /* was initialized by reading checkpoint
+ * record */
if (tliOfPointInHistory(checkPointLoc, expectedTLEs) !=
- checkPoint.ThisTimeLineID)
+ checkPoint.ThisTimeLineID)
{
- XLogRecPtr switchpoint;
+ XLogRecPtr switchpoint;
/*
- * tliSwitchPoint will throw an error if the checkpoint's timeline
- * is not in expectedTLEs at all.
+ * tliSwitchPoint will throw an error if the checkpoint's timeline is
+ * not in expectedTLEs at all.
*/
switchpoint = tliSwitchPoint(ControlFile->checkPointCopy.ThisTimeLineID, expectedTLEs, NULL);
ereport(FATAL,
* history, too.
*/
if (!XLogRecPtrIsInvalid(ControlFile->minRecoveryPoint) &&
- tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
- ControlFile->minRecoveryPointTLI)
+ tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
+ ControlFile->minRecoveryPointTLI)
ereport(FATAL,
(errmsg("requested timeline %u does not contain minimum recovery point %X/%X on timeline %u",
recoveryTargetTLI,
ereport(DEBUG1,
(errmsg("redo record is at %X/%X; shutdown %s",
- (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
+ (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg("next transaction ID: %u/%u; next OID: %u",
ThisTimeLineID = checkPoint.ThisTimeLineID;
/*
- * Copy any missing timeline history files between 'now' and the
- * recovery target timeline from archive to pg_xlog. While we don't need
- * those files ourselves - the history file of the recovery target
- * timeline covers all the previous timelines in the history too - a
- * cascading standby server might be interested in them. Or, if you
- * archive the WAL from this server to a different archive than the
- * master, it'd be good for all the history files to get archived there
- * after failover, so that you can use one of the old timelines as a
- * PITR target. Timeline history files are small, so it's better to copy
- * them unnecessarily than not copy them and regret later.
+ * Copy any missing timeline history files between 'now' and the recovery
+ * target timeline from archive to pg_xlog. While we don't need those
+ * files ourselves - the history file of the recovery target timeline
+ * covers all the previous timelines in the history too - a cascading
+ * standby server might be interested in them. Or, if you archive the WAL
+ * from this server to a different archive than the master, it'd be good
+ * for all the history files to get archived there after failover, so that
+ * you can use one of the old timelines as a PITR target. Timeline history
+ * files are small, so it's better to copy them unnecessarily than not
+ * copy them and regret later.
*/
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
"automatic recovery in progress")));
if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID)
ereport(LOG,
- (errmsg("crash recovery starts in timeline %u "
- "and has target timeline %u",
- ControlFile->checkPointCopy.ThisTimeLineID,
- recoveryTargetTLI)));
+ (errmsg("crash recovery starts in timeline %u "
+ "and has target timeline %u",
+ ControlFile->checkPointCopy.ThisTimeLineID,
+ recoveryTargetTLI)));
ControlFile->state = DB_IN_CRASH_RECOVERY;
}
ControlFile->prevCheckPoint = ControlFile->checkPoint;
ereport(LOG,
(errmsg("redo starts at %X/%X",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
/*
* main redo apply loop
*/
do
{
- bool switchedTLI = false;
+ bool switchedTLI = false;
+
#ifdef WAL_DEBUG
if (XLOG_DEBUG ||
(rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
initStringInfo(&buf);
appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
- (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
+ (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
xlog_outrec(&buf, record);
appendStringInfo(&buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(&buf,
}
/*
- * Before replaying this record, check if this record
- * causes the current timeline to change. The record is
- * already considered to be part of the new timeline,
- * so we update ThisTimeLineID before replaying it.
- * That's important so that replayEndTLI, which is
- * recorded as the minimum recovery point's TLI if
- * recovery stops after this record, is set correctly.
+ * Before replaying this record, check if this record causes
+ * the current timeline to change. The record is already
+ * considered to be part of the new timeline, so we update
+ * ThisTimeLineID before replaying it. That's important so
+ * that replayEndTLI, which is recorded as the minimum
+ * recovery point's TLI if recovery stops after this record,
+ * is set correctly.
*/
if (record->xl_rmid == RM_XLOG_ID)
{
}
else if (info == XLOG_END_OF_RECOVERY)
{
- xl_end_of_recovery xlrec;
+ xl_end_of_recovery xlrec;
memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
newTLI = xlrec.ThisTimeLineID;
ereport(LOG,
(errmsg("redo done at %X/%X",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
xtime = GetLatestXTime();
if (xtime)
ereport(LOG,
PrevTimeLineID = ThisTimeLineID;
if (ArchiveRecoveryRequested)
{
- char reason[200];
+ char reason[200];
Assert(InArchiveRecovery);
* allows some extra error checking in xlog_redo.
*
* In fast promotion, only create a lightweight end-of-recovery record
- * instead of a full checkpoint. A checkpoint is requested later, after
- * we're fully out of recovery mode and already accepting queries.
+ * instead of a full checkpoint. A checkpoint is requested later,
+ * after we're fully out of recovery mode and already accepting
+ * queries.
*/
if (bgwriterLaunched)
{
fast_promoted = true;
/*
- * Insert a special WAL record to mark the end of recovery,
- * since we aren't doing a checkpoint. That means that the
- * checkpointer process may likely be in the middle of a
- * time-smoothed restartpoint and could continue to be for
- * minutes after this. That sounds strange, but the effect
- * is roughly the same and it would be stranger to try to
- * come out of the restartpoint and then checkpoint.
- * We request a checkpoint later anyway, just for safety.
+ * Insert a special WAL record to mark the end of
+ * recovery, since we aren't doing a checkpoint. That
+ * means that the checkpointer process may likely be in
+ * the middle of a time-smoothed restartpoint and could
+ * continue to be for minutes after this. That sounds
+ * strange, but the effect is roughly the same and it
+ * would be stranger to try to come out of the
+ * restartpoint and then checkpoint. We request a
+ * checkpoint later anyway, just for safety.
*/
CreateEndOfRecoveryRecord();
}
if (!fast_promoted)
RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY |
- CHECKPOINT_IMMEDIATE |
- CHECKPOINT_WAIT);
+ CHECKPOINT_IMMEDIATE |
+ CHECKPOINT_WAIT);
}
else
CreateCheckPoint(CHECKPOINT_END_OF_RECOVERY | CHECKPOINT_IMMEDIATE);
}
/*
- * If there were cascading standby servers connected to us, nudge any
- * wal sender processes to notice that we've been promoted.
+ * If there were cascading standby servers connected to us, nudge any wal
+ * sender processes to notice that we've been promoted.
*/
WalSndWakeup();
}
/*
- * Have we passed our safe starting point? Note that minRecoveryPoint
- * is known to be incorrectly set if ControlFile->backupEndRequired,
- * until the XLOG_BACKUP_RECORD arrives to advise us of the correct
+ * Have we passed our safe starting point? Note that minRecoveryPoint is
+ * known to be incorrectly set if ControlFile->backupEndRequired, until
+ * the XLOG_BACKUP_RECORD arrives to advise us of the correct
* minRecoveryPoint. All we know prior to that is that we're not
* consistent yet.
*/
uint32 freespace;
XLogSegNo _logSegNo;
VirtualTransactionId *vxids;
- int nvxids;
+ int nvxids;
/*
* An end-of-recovery checkpoint is really a shutdown checkpoint, just
TRACE_POSTGRESQL_CHECKPOINT_START(flags);
/*
- * In some cases there are groups of actions that must all occur on
- * one side or the other of a checkpoint record. Before flushing the
+ * In some cases there are groups of actions that must all occur on one
+ * side or the other of a checkpoint record. Before flushing the
* checkpoint record we must explicitly wait for any backend currently
* performing those groups of actions.
*
* One example is end of transaction, so we must wait for any transactions
- * that are currently in commit critical sections. If an xact inserted
+ * that are currently in commit critical sections. If an xact inserted
* its commit record into XLOG just before the REDO point, then a crash
* restart from the REDO point would not replay that record, which means
* that our flushing had better include the xact's update of pg_clog. So
vxids = GetVirtualXIDsDelayingChkpt(&nvxids);
if (nvxids > 0)
{
- uint32 nwaits = 0;
+ uint32 nwaits = 0;
do
{
void
CreateEndOfRecoveryRecord(void)
{
- xl_end_of_recovery xlrec;
- XLogRecData rdata;
- XLogRecPtr recptr;
+ xl_end_of_recovery xlrec;
+ XLogRecData rdata;
+ XLogRecPtr recptr;
/* sanity check */
if (!RecoveryInProgress())
XLogFlush(recptr);
/*
- * Update the control file so that crash recovery can follow
- * the timeline changes to this point.
+ * Update the control file so that crash recovery can follow the timeline
+ * changes to this point.
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
ControlFile->time = (pg_time_t) xlrec.end_time;
END_CRIT_SECTION();
- LocalXLogInsertAllowed = -1; /* return to "check" state */
+ LocalXLogInsertAllowed = -1; /* return to "check" state */
}
/*
{
ereport(DEBUG2,
(errmsg("skipping restartpoint, already performed at %X/%X",
- (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
+ (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
if (flags & CHECKPOINT_IS_SHUTDOWN)
XLogRecPtr endptr;
/*
- * Get the current end of xlog replayed or received, whichever is later.
+ * Get the current end of xlog replayed or received, whichever is
+ * later.
*/
receivePtr = GetWalRcvWriteRecPtr(NULL, NULL);
replayPtr = GetXLogReplayRecPtr(NULL);
_logSegNo--;
/*
- * Update ThisTimeLineID to the timeline we're currently replaying,
- * so that we install any recycled segments on that timeline.
+ * Update ThisTimeLineID to the timeline we're currently replaying, so
+ * that we install any recycled segments on that timeline.
*
* There is no guarantee that the WAL segments will be useful on the
* current timeline; if recovery proceeds to a new timeline right
* It's possible or perhaps even likely that we finish recovery while
* a restartpoint is in progress. That means we may get to this point
* some minutes afterwards. Setting ThisTimeLineID at that time would
- * actually set it backwards, so we don't want that to persist; if
- * we do reset it here, make sure to reset it back afterwards. This
+ * actually set it backwards, so we don't want that to persist; if we
+ * do reset it here, make sure to reset it back afterwards. This
* doesn't look very clean or principled, but its the best of about
* five different ways of handling this edge case.
*/
if (RecoveryInProgress())
- (void) GetXLogReplayRecPtr(&ThisTimeLineID);
+ (void) GetXLogReplayRecPtr(&ThisTimeLineID);
RemoveOldXlogFiles(_logSegNo, endptr);
xtime = GetLatestXTime();
ereport((log_checkpoints ? LOG : DEBUG2),
(errmsg("recovery restart point at %X/%X",
- (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
+ (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
xtime ? errdetail("last completed transaction was at log time %s",
timestamptz_to_str(xtime)) : 0));
XLogRecPtr
XLogSaveBufferForHint(Buffer buffer)
{
- XLogRecPtr recptr = InvalidXLogRecPtr;
- XLogRecPtr lsn;
+ XLogRecPtr recptr = InvalidXLogRecPtr;
+ XLogRecPtr lsn;
XLogRecData rdata[2];
- BkpBlock bkpb;
+ BkpBlock bkpb;
/*
* Ensure no checkpoint can change our view of RedoRecPtr.
GetRedoRecPtr();
/*
- * Setup phony rdata element for use within XLogCheckBuffer only.
- * We reuse and reset rdata for any actual WAL record insert.
+ * Setup phony rdata element for use within XLogCheckBuffer only. We reuse
+ * and reset rdata for any actual WAL record insert.
*/
rdata[0].buffer = buffer;
rdata[0].buffer_std = true;
*/
if (XLogCheckBuffer(rdata, false, &lsn, &bkpb))
{
- char copied_buffer[BLCKSZ];
- char *origdata = (char *) BufferGetBlock(buffer);
+ char copied_buffer[BLCKSZ];
+ char *origdata = (char *) BufferGetBlock(buffer);
/*
* Copy buffer so we don't have to worry about concurrent hint bit or
*/
memcpy(copied_buffer, origdata, bkpb.hole_offset);
memcpy(copied_buffer + bkpb.hole_offset,
- origdata + bkpb.hole_offset + bkpb.hole_length,
- BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
+ origdata + bkpb.hole_offset + bkpb.hole_length,
+ BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
/*
* Header for backup block.
ereport(PANIC,
(errmsg("unexpected prev timeline ID %u (current timeline ID %u) in checkpoint record",
prevTLI, ThisTimeLineID)));
+
/*
- * The new timeline better be in the list of timelines we expect
- * to see, according to the timeline history. It should also not
- * decrease.
+ * The new timeline better be in the list of timelines we expect to see,
+ * according to the timeline history. It should also not decrease.
*/
if (newTLI < ThisTimeLineID || !tliInHistory(newTLI, expectedTLEs))
ereport(PANIC,
- (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
- newTLI, ThisTimeLineID)));
+ (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
+ newTLI, ThisTimeLineID)));
/*
- * If we have not yet reached min recovery point, and we're about
- * to switch to a timeline greater than the timeline of the min
- * recovery point: trouble. After switching to the new timeline,
- * we could not possibly visit the min recovery point on the
- * correct timeline anymore. This can happen if there is a newer
- * timeline in the archive that branched before the timeline the
- * min recovery point is on, and you attempt to do PITR to the
- * new timeline.
+ * If we have not yet reached min recovery point, and we're about to
+ * switch to a timeline greater than the timeline of the min recovery
+ * point: trouble. After switching to the new timeline, we could not
+ * possibly visit the min recovery point on the correct timeline anymore.
+ * This can happen if there is a newer timeline in the archive that
+ * branched before the timeline the min recovery point is on, and you
+ * attempt to do PITR to the new timeline.
*/
if (!XLogRecPtrIsInvalid(minRecoveryPoint) &&
lsn < minRecoveryPoint &&
}
else if (info == XLOG_HINT)
{
- char *data;
- BkpBlock bkpb;
+ char *data;
+ BkpBlock bkpb;
/*
- * Hint bit records contain a backup block stored "inline" in the normal
- * data since the locking when writing hint records isn't sufficient to
- * use the normal backup block mechanism, which assumes exclusive lock
- * on the buffer supplied.
+ * Hint bit records contain a backup block stored "inline" in the
+ * normal data since the locking when writing hint records isn't
+ * sufficient to use the normal backup block mechanism, which assumes
+ * exclusive lock on the buffer supplied.
*
- * Since the only change in these backup block are hint bits, there are
- * no recovery conflicts generated.
+ * Since the only change in these backup block are hint bits, there
+ * are no recovery conflicts generated.
*
- * This also means there is no corresponding API call for this,
- * so an smgr implementation has no need to implement anything.
- * Which means nothing is needed in md.c etc
+ * This also means there is no corresponding API call for this, so an
+ * smgr implementation has no need to implement anything. Which means
+ * nothing is needed in md.c etc
*/
data = XLogRecGetData(record);
memcpy(&bkpb, data, sizeof(BkpBlock));
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not fsync log segment %s: %m",
- XLogFileNameP(ThisTimeLineID, openLogSegNo))));
+ XLogFileNameP(ThisTimeLineID, openLogSegNo))));
if (get_sync_bit(sync_method) != get_sync_bit(new_sync_method))
XLogFileClose();
}
if (pg_fsync_writethrough(fd) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync write-through log file %s: %m",
- XLogFileNameP(ThisTimeLineID, segno))));
+ errmsg("could not fsync write-through log file %s: %m",
+ XLogFileNameP(ThisTimeLineID, segno))));
break;
#endif
#ifdef HAVE_FDATASYNC
XLogFileNameP(TimeLineID tli, XLogSegNo segno)
{
char *result = palloc(MAXFNAMELEN);
+
XLogFileName(result, tli, segno);
return result;
}
"%Y-%m-%d %H:%M:%S %Z",
pg_localtime(&stamp_time, log_timezone));
appendStringInfo(&labelfbuf, "START WAL LOCATION: %X/%X (file %s)\n",
- (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
+ (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
appendStringInfo(&labelfbuf, "CHECKPOINT LOCATION: %X/%X\n",
- (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
+ (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
appendStringInfo(&labelfbuf, "BACKUP METHOD: %s\n",
exclusive ? "pg_start_backup" : "streamed");
appendStringInfo(&labelfbuf, "BACKUP FROM: %s\n",
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"during online backup"),
- errhint("This means that the backup being taken on the standby "
- "is corrupt and should not be used. "
+ errhint("This means that the backup being taken on the standby "
+ "is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
- "and then try an online backup again.")));
+ "and then try an online backup again.")));
LWLockAcquire(ControlFileLock, LW_SHARED);
errmsg("could not create file \"%s\": %m",
histfilepath)));
fprintf(fp, "START WAL LOCATION: %X/%X (file %s)\n",
- (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
+ (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
(uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
/* transfer remaining lines from label to history file */
XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI)
{
XLogPageReadPrivate *private =
- (XLogPageReadPrivate *) xlogreader->private_data;
+ (XLogPageReadPrivate *) xlogreader->private_data;
int emode = private->emode;
uint32 targetPageOff;
- XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
+ XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
XLByteToSeg(targetPagePtr, targetSegNo);
targetPageOff = targetPagePtr % XLogSegSize;
readOff = targetPageOff;
if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
{
- char fname[MAXFNAMELEN];
+ char fname[MAXFNAMELEN];
XLogFileName(fname, curFileTLI, readSegNo);
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode_for_file_access(),
- errmsg("could not seek in log segment %s to offset %u: %m",
+ errmsg("could not seek in log segment %s to offset %u: %m",
fname, readOff)));
goto next_record_is_invalid;
}
if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
{
- char fname[MAXFNAMELEN];
+ char fname[MAXFNAMELEN];
XLogFileName(fname, curFileTLI, readSegNo);
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode_for_file_access(),
- errmsg("could not read from log segment %s, offset %u: %m",
+ errmsg("could not read from log segment %s, offset %u: %m",
fname, readOff)));
goto next_record_is_invalid;
}
bool fetching_ckpt, XLogRecPtr tliRecPtr)
{
static pg_time_t last_fail_time = 0;
- pg_time_t now;
+ pg_time_t now;
/*-------
* Standby mode is implemented by a state machine:
*
- * 1. Read from archive (XLOG_FROM_ARCHIVE)
+ * 1. Read from archive (XLOG_FROM_ARCHIVE)
* 2. Read from pg_xlog (XLOG_FROM_PG_XLOG)
* 3. Check trigger file
* 4. Read from primary server via walreceiver (XLOG_FROM_STREAM)
for (;;)
{
- int oldSource = currentSource;
+ int oldSource = currentSource;
/*
* First check if we failed to read from the current source, and
break;
case XLOG_FROM_PG_XLOG:
+
/*
- * Check to see if the trigger file exists. Note that we do
- * this only after failure, so when you create the trigger
- * file, we still finish replaying as much as we can from
- * archive and pg_xlog before failover.
+ * Check to see if the trigger file exists. Note that we
+ * do this only after failure, so when you create the
+ * trigger file, we still finish replaying as much as we
+ * can from archive and pg_xlog before failover.
*/
if (StandbyMode && CheckForStandbyTrigger())
{
}
/*
- * Not in standby mode, and we've now tried the archive and
- * pg_xlog.
+ * Not in standby mode, and we've now tried the archive
+ * and pg_xlog.
*/
if (!StandbyMode)
return false;
/*
- * If primary_conninfo is set, launch walreceiver to try to
- * stream the missing WAL.
+ * If primary_conninfo is set, launch walreceiver to try
+ * to stream the missing WAL.
*
* If fetching_ckpt is TRUE, RecPtr points to the initial
* checkpoint location. In that case, we use RedoStartLSN
*/
if (PrimaryConnInfo)
{
- XLogRecPtr ptr;
- TimeLineID tli;
+ XLogRecPtr ptr;
+ TimeLineID tli;
if (fetching_ckpt)
{
RequestXLogStreaming(tli, ptr, PrimaryConnInfo);
receivedUpto = 0;
}
+
/*
- * Move to XLOG_FROM_STREAM state in either case. We'll get
- * immediate failure if we didn't launch walreceiver, and
- * move on to the next state.
+ * Move to XLOG_FROM_STREAM state in either case. We'll
+ * get immediate failure if we didn't launch walreceiver,
+ * and move on to the next state.
*/
currentSource = XLOG_FROM_STREAM;
break;
case XLOG_FROM_STREAM:
+
/*
- * Failure while streaming. Most likely, we got here because
- * streaming replication was terminated, or promotion was
- * triggered. But we also get here if we find an invalid
- * record in the WAL streamed from master, in which case
- * something is seriously wrong. There's little chance that
- * the problem will just go away, but PANIC is not good for
- * availability either, especially in hot standby mode. So,
- * we treat that the same as disconnection, and retry from
- * archive/pg_xlog again. The WAL in the archive should be
- * identical to what was streamed, so it's unlikely that it
- * helps, but one can hope...
+ * Failure while streaming. Most likely, we got here
+ * because streaming replication was terminated, or
+ * promotion was triggered. But we also get here if we
+ * find an invalid record in the WAL streamed from master,
+ * in which case something is seriously wrong. There's
+ * little chance that the problem will just go away, but
+ * PANIC is not good for availability either, especially
+ * in hot standby mode. So, we treat that the same as
+ * disconnection, and retry from archive/pg_xlog again.
+ * The WAL in the archive should be identical to what was
+ * streamed, so it's unlikely that it helps, but one can
+ * hope...
*/
+
/*
* Before we leave XLOG_FROM_STREAM state, make sure that
* walreceiver is not active, so that it won't overwrite
}
/*
- * XLOG_FROM_STREAM is the last state in our state machine,
- * so we've exhausted all the options for obtaining the
- * requested WAL. We're going to loop back and retry from
- * the archive, but if it hasn't been long since last
- * attempt, sleep 5 seconds to avoid busy-waiting.
+ * XLOG_FROM_STREAM is the last state in our state
+ * machine, so we've exhausted all the options for
+ * obtaining the requested WAL. We're going to loop back
+ * and retry from the archive, but if it hasn't been long
+ * since last attempt, sleep 5 seconds to avoid
+ * busy-waiting.
*/
now = (pg_time_t) time(NULL);
if ((now - last_fail_time) < 5)
else if (currentSource == XLOG_FROM_PG_XLOG)
{
/*
- * We just successfully read a file in pg_xlog. We prefer files
- * in the archive over ones in pg_xlog, so try the next file
- * again from the archive first.
+ * We just successfully read a file in pg_xlog. We prefer files in
+ * the archive over ones in pg_xlog, so try the next file again
+ * from the archive first.
*/
if (InArchiveRecovery)
currentSource = XLOG_FROM_ARCHIVE;
break;
case XLOG_FROM_STREAM:
- {
- bool havedata;
-
- /*
- * Check if WAL receiver is still active.
- */
- if (!WalRcvStreaming())
- {
- lastSourceFailed = true;
- break;
- }
-
- /*
- * Walreceiver is active, so see if new data has arrived.
- *
- * We only advance XLogReceiptTime when we obtain fresh WAL
- * from walreceiver and observe that we had already processed
- * everything before the most recent "chunk" that it flushed to
- * disk. In steady state where we are keeping up with the
- * incoming data, XLogReceiptTime will be updated on each cycle.
- * When we are behind, XLogReceiptTime will not advance, so the
- * grace time allotted to conflicting queries will decrease.
- */
- if (RecPtr < receivedUpto)
- havedata = true;
- else
{
- XLogRecPtr latestChunkStart;
+ bool havedata;
- receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
- if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
+ /*
+ * Check if WAL receiver is still active.
+ */
+ if (!WalRcvStreaming())
{
+ lastSourceFailed = true;
+ break;
+ }
+
+ /*
+ * Walreceiver is active, so see if new data has arrived.
+ *
+ * We only advance XLogReceiptTime when we obtain fresh
+ * WAL from walreceiver and observe that we had already
+ * processed everything before the most recent "chunk"
+ * that it flushed to disk. In steady state where we are
+ * keeping up with the incoming data, XLogReceiptTime will
+ * be updated on each cycle. When we are behind,
+ * XLogReceiptTime will not advance, so the grace time
+ * allotted to conflicting queries will decrease.
+ */
+ if (RecPtr < receivedUpto)
havedata = true;
- if (latestChunkStart <= RecPtr)
+ else
+ {
+ XLogRecPtr latestChunkStart;
+
+ receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
+ if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
{
- XLogReceiptTime = GetCurrentTimestamp();
- SetCurrentChunkStartTime(XLogReceiptTime);
+ havedata = true;
+ if (latestChunkStart <= RecPtr)
+ {
+ XLogReceiptTime = GetCurrentTimestamp();
+ SetCurrentChunkStartTime(XLogReceiptTime);
+ }
}
+ else
+ havedata = false;
}
- else
- havedata = false;
- }
- if (havedata)
- {
- /*
- * Great, streamed far enough. Open the file if it's not
- * open already. Also read the timeline history file if
- * we haven't initialized timeline history yet; it should
- * be streamed over and present in pg_xlog by now. Use
- * XLOG_FROM_STREAM so that source info is set correctly
- * and XLogReceiptTime isn't changed.
- */
- if (readFile < 0)
+ if (havedata)
{
- if (!expectedTLEs)
- expectedTLEs = readTimeLineHistory(receiveTLI);
- readFile = XLogFileRead(readSegNo, PANIC,
- receiveTLI,
- XLOG_FROM_STREAM, false);
- Assert(readFile >= 0);
+ /*
+ * Great, streamed far enough. Open the file if it's
+ * not open already. Also read the timeline history
+ * file if we haven't initialized timeline history
+ * yet; it should be streamed over and present in
+ * pg_xlog by now. Use XLOG_FROM_STREAM so that
+ * source info is set correctly and XLogReceiptTime
+ * isn't changed.
+ */
+ if (readFile < 0)
+ {
+ if (!expectedTLEs)
+ expectedTLEs = readTimeLineHistory(receiveTLI);
+ readFile = XLogFileRead(readSegNo, PANIC,
+ receiveTLI,
+ XLOG_FROM_STREAM, false);
+ Assert(readFile >= 0);
+ }
+ else
+ {
+ /* just make sure source info is correct... */
+ readSource = XLOG_FROM_STREAM;
+ XLogReceiptSource = XLOG_FROM_STREAM;
+ return true;
+ }
+ break;
}
- else
+
+ /*
+ * Data not here yet. Check for trigger, then wait for
+ * walreceiver to wake us up when new WAL arrives.
+ */
+ if (CheckForStandbyTrigger())
{
- /* just make sure source info is correct... */
- readSource = XLOG_FROM_STREAM;
- XLogReceiptSource = XLOG_FROM_STREAM;
- return true;
+ /*
+ * Note that we don't "return false" immediately here.
+ * After being triggered, we still want to replay all
+ * the WAL that was already streamed. It's in pg_xlog
+ * now, so we just treat this as a failure, and the
+ * state machine will move on to replay the streamed
+ * WAL from pg_xlog, and then recheck the trigger and
+ * exit replay.
+ */
+ lastSourceFailed = true;
+ break;
}
- break;
- }
- /*
- * Data not here yet. Check for trigger, then wait for
- * walreceiver to wake us up when new WAL arrives.
- */
- if (CheckForStandbyTrigger())
- {
/*
- * Note that we don't "return false" immediately here.
- * After being triggered, we still want to replay all the
- * WAL that was already streamed. It's in pg_xlog now, so
- * we just treat this as a failure, and the state machine
- * will move on to replay the streamed WAL from pg_xlog,
- * and then recheck the trigger and exit replay.
+ * Wait for more WAL to arrive. Time out after 5 seconds,
+ * like when polling the archive, to react to a trigger
+ * file promptly.
*/
- lastSourceFailed = true;
+ WaitLatch(&XLogCtl->recoveryWakeupLatch,
+ WL_LATCH_SET | WL_TIMEOUT,
+ 5000L);
+ ResetLatch(&XLogCtl->recoveryWakeupLatch);
break;
}
- /*
- * Wait for more WAL to arrive. Time out after 5 seconds, like
- * when polling the archive, to react to a trigger file
- * promptly.
- */
- WaitLatch(&XLogCtl->recoveryWakeupLatch,
- WL_LATCH_SET | WL_TIMEOUT,
- 5000L);
- ResetLatch(&XLogCtl->recoveryWakeupLatch);
- break;
- }
-
default:
elog(ERROR, "unexpected WAL source %d", currentSource);
}
if (IsPromoteTriggered())
{
/*
- * In 9.1 and 9.2 the postmaster unlinked the promote file
- * inside the signal handler. We now leave the file in place
- * and let the Startup process do the unlink. This allows
- * Startup to know whether we're doing fast or normal
- * promotion. Fast promotion takes precedence.
+ * In 9.1 and 9.2 the postmaster unlinked the promote file inside the
+ * signal handler. We now leave the file in place and let the Startup
+ * process do the unlink. This allows Startup to know whether we're
+ * doing fast or normal promotion. Fast promotion takes precedence.
*/
if (stat(FAST_PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
{
* of log segments that weren't yet transferred to the archive.
*
* Notice that we don't actually overwrite any files when we copy back
- * from archive because the restore_command may inadvertently
- * restore inappropriate xlogs, or they may be corrupt, so we may wish to
- * fallback to the segments remaining in current XLOGDIR later. The
+ * from archive because the restore_command may inadvertently restore
+ * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
+ * to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
if (stat(xlogfpath, &statbuf) == 0)
{
- char oldpath[MAXPGPATH];
+ char oldpath[MAXPGPATH];
+
#ifdef WIN32
static unsigned int deletedcounter = 1;
+
/*
- * On Windows, if another process (e.g a walsender process) holds
- * the file open in FILE_SHARE_DELETE mode, unlink will succeed,
- * but the file will still show up in directory listing until the
- * last handle is closed, and we cannot rename the new file in its
- * place until that. To avoid that problem, rename the old file to
- * a temporary name first. Use a counter to create a unique
- * filename, because the same file might be restored from the
- * archive multiple times, and a walsender could still be holding
- * onto an old deleted version of it.
+ * On Windows, if another process (e.g a walsender process) holds the
+ * file open in FILE_SHARE_DELETE mode, unlink will succeed, but the
+ * file will still show up in directory listing until the last handle
+ * is closed, and we cannot rename the new file in its place until
+ * that. To avoid that problem, rename the old file to a temporary
+ * name first. Use a counter to create a unique filename, because the
+ * same file might be restored from the archive multiple times, and a
+ * walsender could still be holding onto an old deleted version of it.
*/
snprintf(oldpath, MAXPGPATH, "%s.deleted%u",
xlogfpath, deletedcounter++);
path, xlogfpath)));
/*
- * Create .done file forcibly to prevent the restored segment from
- * being archived again later.
+ * Create .done file forcibly to prevent the restored segment from being
+ * archived again later.
*/
XLogArchiveForceDone(xlogfname);
/*
- * If the existing file was replaced, since walsenders might have it
- * open, request them to reload a currently-open segment. This is only
- * required for WAL segments, walsenders don't hold other files open, but
- * there's no harm in doing this too often, and we don't know what kind
- * of a file we're dealing with here.
+ * If the existing file was replaced, since walsenders might have it open,
+ * request them to reload a currently-open segment. This is only required
+ * for WAL segments, walsenders don't hold other files open, but there's
+ * no harm in doing this too often, and we don't know what kind of a file
+ * we're dealing with here.
*/
if (reload)
WalSndRqstFileReload();
* XXX: this won't handle values higher than 2^63 correctly.
*/
result = DatumGetNumeric(DirectFunctionCall2(numeric_sub,
- DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
- DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
+ DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
+ DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
PG_RETURN_NUMERIC(result);
}
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m",
- BACKUP_LABEL_FILE)));
+ BACKUP_LABEL_FILE)));
PG_RETURN_NULL();
}
if (ferror(lfp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
+ errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
/* Close the backup label file. */
if (FreeFile(lfp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
+ errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
if (strlen(backup_start_time) == 0)
ereport(ERROR,
targetRecOff = RecPtr % XLOG_BLCKSZ;
/*
- * Read the page containing the record into state->readBuf. Request
- * enough byte to cover the whole record header, or at least the part of
- * it that fits on the same page.
+ * Read the page containing the record into state->readBuf. Request enough
+ * byte to cover the whole record header, or at least the part of it that
+ * fits on the same page.
*/
readOff = ReadPageInternal(state,
targetPagePtr,
extern int optind;
extern char *optarg;
-uint32 bootstrap_data_checksum_version = 0; /* No checksum */
+uint32 bootstrap_data_checksum_version = 0; /* No checksum */
#define ALLOC(t, c) ((t *) calloc((unsigned)(c), sizeof(t)))
* ----------------
*/
-AuxProcType MyAuxProcType = NotAnAuxProcess; /* declared in miscadmin.h */
+AuxProcType MyAuxProcType = NotAnAuxProcess; /* declared in miscadmin.h */
Relation boot_reldesc; /* current relation descriptor */
/*
* Assign the ProcSignalSlot for an auxiliary process. Since it
* doesn't have a BackendId, the slot is statically allocated based on
- * the auxiliary process type (MyAuxProcType). Backends use slots
+ * the auxiliary process type (MyAuxProcType). Backends use slots
* indexed in the range from 1 to MaxBackends (inclusive), so we use
* MaxBackends + AuxProcType + 1 as the index of the slot for an
* auxiliary process.
void
aclcheck_error_type(AclResult aclerr, Oid typeOid)
{
- Oid element_type = get_element_type(typeOid);
+ Oid element_type = get_element_type(typeOid);
aclcheck_error(aclerr, ACL_KIND_TYPE, format_type_be(element_type ? element_type : typeOid));
}
* This is exported separately because there are cases where we want to use
* an index that will not be recognized by RelationGetOidIndex: TOAST tables
* have indexes that are usable, but have multiple columns and are on
- * ordinary columns rather than a true OID column. This code will work
+ * ordinary columns rather than a true OID column. This code will work
* anyway, so long as the OID is the index's first column. The caller must
* pass in the actual heap attnum of the OID column, however.
*
deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel,
int flags)
{
- int i;
+ int i;
/*
* Keep track of objects for event triggers, if necessary.
bool is_validated, bool is_local, int inhcount,
bool is_no_inherit, bool is_internal);
static void StoreConstraints(Relation rel, List *cooked_constraints,
- bool is_internal);
+ bool is_internal);
static bool MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
bool allow_merge, bool is_local,
bool is_no_inherit);
* that will do.
*/
new_rel_reltup->relfrozenxid = RecentXmin;
+
/*
* Similarly, initialize the minimum Multixact to the first value that
* could possibly be stored in tuples in the table. Running
/*
* Post creation hook for attribute defaults.
*
- * XXX. ALTER TABLE ALTER COLUMN SET/DROP DEFAULT is implemented
- * with a couple of deletion/creation of the attribute's default entry,
- * so the callee should check existence of an older version of this
- * entry if it needs to distinguish.
+ * XXX. ALTER TABLE ALTER COLUMN SET/DROP DEFAULT is implemented with a
+ * couple of deletion/creation of the attribute's default entry, so the
+ * callee should check existence of an older version of this entry if it
+ * needs to distinguish.
*/
InvokeObjectPostCreateHookArg(AttrDefaultRelationId,
RelationGetRelid(rel), attnum, is_internal);
is_local, /* conislocal */
inhcount, /* coninhcount */
is_no_inherit, /* connoinherit */
- is_internal); /* internally constructed? */
+ is_internal); /* internally constructed? */
pfree(ccbin);
pfree(ccsrc);
Oid namespaceId;
namespaceId = LookupExplicitNamespace(relation->schemaname, missing_ok);
+
/*
- * For missing_ok, allow a non-existant schema name to
- * return InvalidOid.
+ * For missing_ok, allow a non-existant schema name to
+ * return InvalidOid.
*/
if (namespaceId != myTempNamespace)
ereport(ERROR,
namespaceId = get_namespace_oid(nspname, missing_ok);
if (missing_ok && !OidIsValid(namespaceId))
return InvalidOid;
-
+
aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
RunObjectPostCreateHook(Oid classId, Oid objectId, int subId,
bool is_internal)
{
- ObjectAccessPostCreate pc_arg;
+ ObjectAccessPostCreate pc_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
memset(&pc_arg, 0, sizeof(ObjectAccessPostCreate));
pc_arg.is_internal = is_internal;
- (*object_access_hook)(OAT_POST_CREATE,
- classId, objectId, subId,
- (void *) &pc_arg);
+ (*object_access_hook) (OAT_POST_CREATE,
+ classId, objectId, subId,
+ (void *) &pc_arg);
}
/*
RunObjectDropHook(Oid classId, Oid objectId, int subId,
int dropflags)
{
- ObjectAccessDrop drop_arg;
+ ObjectAccessDrop drop_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
drop_arg.dropflags = dropflags;
- (*object_access_hook)(OAT_DROP,
- classId, objectId, subId,
- (void *) &drop_arg);
+ (*object_access_hook) (OAT_DROP,
+ classId, objectId, subId,
+ (void *) &drop_arg);
}
/*
RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
Oid auxiliaryId, bool is_internal)
{
- ObjectAccessPostAlter pa_arg;
+ ObjectAccessPostAlter pa_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
pa_arg.auxiliary_id = auxiliaryId;
pa_arg.is_internal = is_internal;
- (*object_access_hook)(OAT_POST_ALTER,
- classId, objectId, subId,
- (void *) &pa_arg);
+ (*object_access_hook) (OAT_POST_ALTER,
+ classId, objectId, subId,
+ (void *) &pa_arg);
}
/*
bool
RunNamespaceSearchHook(Oid objectId, bool ereport_on_violation)
{
- ObjectAccessNamespaceSearch ns_arg;
+ ObjectAccessNamespaceSearch ns_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
ns_arg.ereport_on_violation = ereport_on_violation;
ns_arg.result = true;
- (*object_access_hook)(OAT_NAMESPACE_SEARCH,
- NamespaceRelationId, objectId, 0,
- (void *) &ns_arg);
+ (*object_access_hook) (OAT_NAMESPACE_SEARCH,
+ NamespaceRelationId, objectId, 0,
+ (void *) &ns_arg);
return ns_arg.result;
}
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
- (*object_access_hook)(OAT_FUNCTION_EXECUTE,
- ProcedureRelationId, objectId, 0,
- NULL);
+ (*object_access_hook) (OAT_FUNCTION_EXECUTE,
+ ProcedureRelationId, objectId, 0,
+ NULL);
}
AttrNumber attnum_owner; /* attnum of owner field */
AttrNumber attnum_acl; /* attnum of acl field */
AclObjectKind acl_kind; /* ACL_KIND_* of this object type */
- bool is_nsp_name_unique; /* can the nsp/name combination (or name
- * alone, if there's no namespace) be
- * considered an unique identifier for an
- * object of this class? */
+ bool is_nsp_name_unique; /* can the nsp/name combination (or
+ * name alone, if there's no
+ * namespace) be considered an unique
+ * identifier for an object of this
+ * class? */
} ObjectPropertyType;
static ObjectPropertyType ObjectProperty[] =
ereport(ERROR,
(errmsg_internal("unrecognized class id: %u", class_id)));
- return NULL; /* keep MSC compiler happy */
+ return NULL; /* keep MSC compiler happy */
}
/*
if (oidCacheId > 0)
{
tuple = SearchSysCacheCopy1(oidCacheId, ObjectIdGetDatum(objectId));
- if (!HeapTupleIsValid(tuple)) /* should not happen */
+ if (!HeapTupleIsValid(tuple)) /* should not happen */
return NULL;
}
else
{
Oid oidIndexId = get_object_oid_index(classId);
- SysScanDesc scan;
- ScanKeyData skey;
+ SysScanDesc scan;
+ ScanKeyData skey;
Assert(OidIsValid(oidIndexId));
break;
}
- case OCLASS_EVENT_TRIGGER:
+ case OCLASS_EVENT_TRIGGER:
{
HeapTuple tup;
elog(ERROR, "cache lookup failed for event trigger %u",
object->objectId);
appendStringInfo(&buffer, _("event trigger %s"),
- NameStr(((Form_pg_event_trigger) GETSTRUCT(tup))->evtname));
+ NameStr(((Form_pg_event_trigger) GETSTRUCT(tup))->evtname));
ReleaseSysCache(tup);
break;
}
RelationGetDescr(catalog), &isnull);
if (isnull)
elog(ERROR, "invalid null namespace in object %u/%u/%d",
- address.classId, address.objectId, address.objectSubId);
+ address.classId, address.objectId, address.objectSubId);
}
/*
- * We only return the object name if it can be used (together
- * with the schema name, if any) as an unique identifier.
+ * We only return the object name if it can be used (together with
+ * the schema name, if any) as an unique identifier.
*/
if (get_object_namensp_unique(address.classId))
{
nameAttnum = get_object_attnum_name(address.classId);
if (nameAttnum != InvalidAttrNumber)
{
- Datum nameDatum;
+ Datum nameDatum;
nameDatum = heap_getattr(objtup, nameAttnum,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
if (isnull)
elog(ERROR, "invalid null name in object %u/%u/%d",
address.classId, address.objectId, address.objectSubId);
/* schema name */
if (OidIsValid(schema_oid))
{
- const char *schema = quote_identifier(get_namespace_name(schema_oid));
+ const char *schema = quote_identifier(get_namespace_name(schema_oid));
values[1] = CStringGetTextDatum(schema);
nulls[1] = false;
{
Relation constrRel;
HeapTuple constrTup;
- Form_pg_constraint constrForm;
+ Form_pg_constraint constrForm;
constrRel = heap_open(ConstraintRelationId, AccessShareLock);
constrTup = get_catalog_object_by_oid(constrRel, constroid);
Form_pg_proc procForm;
procTup = SearchSysCache1(PROCOID,
- ObjectIdGetDatum(procid));
+ ObjectIdGetDatum(procid));
if (!HeapTupleIsValid(procTup))
elog(ERROR, "cache lookup failed for procedure %u", procid);
procForm = (Form_pg_proc) GETSTRUCT(procTup);
getRelationIdentity(&buffer, object->objectId);
if (object->objectSubId != 0)
{
- char *attr;
+ char *attr;
attr = get_relid_attribute_name(object->objectId,
object->objectSubId);
castForm = (Form_pg_cast) GETSTRUCT(tup);
appendStringInfo(&buffer, "(%s AS %s)",
- format_type_be_qualified(castForm->castsource),
- format_type_be_qualified(castForm->casttarget));
+ format_type_be_qualified(castForm->castsource),
+ format_type_be_qualified(castForm->casttarget));
heap_close(castRel, AccessShareLock);
break;
{
HeapTuple collTup;
Form_pg_collation coll;
- char *schema;
+ char *schema;
collTup = SearchSysCache1(COLLOID,
ObjectIdGetDatum(object->objectId));
schema = get_namespace_name(coll->collnamespace);
appendStringInfoString(&buffer,
quote_qualified_identifier(schema,
- NameStr(coll->collname)));
+ NameStr(coll->collname)));
ReleaseSysCache(collTup);
break;
}
}
else
{
- ObjectAddress domain;
+ ObjectAddress domain;
domain.classId = TypeRelationId;
domain.objectId = con->contypid;
object->objectId);
langForm = (Form_pg_language) GETSTRUCT(langTup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(langForm->lanname)));
+ quote_identifier(NameStr(langForm->lanname)));
ReleaseSysCache(langTup);
break;
}
appendStringInfo(&buffer,
"%s",
quote_qualified_identifier(schema,
- NameStr(opcForm->opcname)));
+ NameStr(opcForm->opcname)));
appendStringInfo(&buffer, " for %s",
quote_identifier(NameStr(amForm->amname)));
appendStringInfo(&buffer, "operator %d (%s, %s) of %s",
amopForm->amopstrategy,
- format_type_be_qualified(amopForm->amoplefttype),
- format_type_be_qualified(amopForm->amoprighttype),
+ format_type_be_qualified(amopForm->amoplefttype),
+ format_type_be_qualified(amopForm->amoprighttype),
opfam.data);
pfree(opfam.data);
appendStringInfo(&buffer, "function %d (%s, %s) of %s",
amprocForm->amprocnum,
- format_type_be_qualified(amprocForm->amproclefttype),
- format_type_be_qualified(amprocForm->amprocrighttype),
+ format_type_be_qualified(amprocForm->amproclefttype),
+ format_type_be_qualified(amprocForm->amprocrighttype),
opfam.data);
pfree(opfam.data);
case OCLASS_TSPARSER:
{
HeapTuple tup;
- Form_pg_ts_parser formParser;
+ Form_pg_ts_parser formParser;
tup = SearchSysCache1(TSPARSEROID,
ObjectIdGetDatum(object->objectId));
object->objectId);
formParser = (Form_pg_ts_parser) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(formParser->prsname)));
+ quote_identifier(NameStr(formParser->prsname)));
ReleaseSysCache(tup);
break;
}
case OCLASS_TSDICT:
{
HeapTuple tup;
- Form_pg_ts_dict formDict;
+ Form_pg_ts_dict formDict;
tup = SearchSysCache1(TSDICTOID,
ObjectIdGetDatum(object->objectId));
object->objectId);
formDict = (Form_pg_ts_dict) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(formDict->dictname)));
+ quote_identifier(NameStr(formDict->dictname)));
ReleaseSysCache(tup);
break;
}
object->objectId);
formTmpl = (Form_pg_ts_template) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(formTmpl->tmplname)));
+ quote_identifier(NameStr(formTmpl->tmplname)));
ReleaseSysCache(tup);
break;
}
case OCLASS_ROLE:
{
- char *username;
+ char *username;
username = GetUserNameFromId(object->objectId);
appendStringInfo(&buffer, "%s",
appendStringInfo(&buffer,
"for role %s",
- quote_identifier(GetUserNameFromId(defacl->defaclrole)));
+ quote_identifier(GetUserNameFromId(defacl->defaclrole)));
if (OidIsValid(defacl->defaclnamespace))
{
- char *schema;
+ char *schema;
schema = get_namespace_name(defacl->defaclnamespace);
appendStringInfo(&buffer,
object->objectId);
trigForm = (Form_pg_event_trigger) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(trigForm->evtname)));
+ quote_identifier(NameStr(trigForm->evtname)));
ReleaseSysCache(tup);
break;
}
*/
void
AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
- Oid newNspId, bool isType, ObjectAddresses *objsMoved)
+ Oid newNspId, bool isType, ObjectAddresses *objsMoved)
{
Relation conRel;
ScanKeyData key[1];
while (HeapTupleIsValid((tup = systable_getnext(scan))))
{
Form_pg_constraint conform = (Form_pg_constraint) GETSTRUCT(tup);
- ObjectAddress thisobj;
+ ObjectAddress thisobj;
thisobj.classId = ConstraintRelationId;
thisobj.objectId = HeapTupleGetOid(tup);
const char *newVal,
const char *neighbor,
bool newValIsAfter,
- bool skipIfExists)
+ bool skipIfExists)
{
Relation pg_enum;
Oid newOid;
return false;
/*
- * For SQL standard compatibility, '+' and '-' cannot be the last char of a
- * multi-char operator unless the operator contains chars that are not in
- * SQL operators. The idea is to lex '=-' as two operators, but not to
- * forbid operator names like '?-' that could not be sequences of standard SQL
- * operators.
+ * For SQL standard compatibility, '+' and '-' cannot be the last char of
+ * a multi-char operator unless the operator contains chars that are not
+ * in SQL operators. The idea is to lex '=-' as two operators, but not to
+ * forbid operator names like '?-' that could not be sequences of standard
+ * SQL operators.
*/
if (len > 1 &&
(name[len - 1] == '+' ||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change return type of existing function"),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
/*
* If it returns RECORD, check for possible change of record type
errmsg("cannot change return type of existing function"),
errdetail("Row type defined by OUT parameters is different."),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
}
/*
errmsg("cannot change name of input parameter \"%s\"",
old_arg_names[j]),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
}
}
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot remove parameter defaults from existing function"),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
proargdefaults = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup,
Anum_pg_proc_proargdefaults,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change data type of existing parameter default value"),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
newlc = lnext(newlc);
}
}
AlterEventTriggerOwner_oid(sdepForm->objid, newrole);
break;
- /* Generic alter owner cases */
+ /* Generic alter owner cases */
case CollationRelationId:
case ConversionRelationId:
case OperatorRelationId:
smgrcreate(reln, MAIN_FORKNUM, true);
/*
- * Before we perform the truncation, update minimum recovery point
- * to cover this WAL record. Once the relation is truncated, there's
- * no going back. The buffer manager enforces the WAL-first rule
- * for normal updates to relation files, so that the minimum recovery
- * point is always updated before the corresponding change in the
- * data file is flushed to disk. We have to do the same manually
- * here.
+ * Before we perform the truncation, update minimum recovery point to
+ * cover this WAL record. Once the relation is truncated, there's no
+ * going back. The buffer manager enforces the WAL-first rule for
+ * normal updates to relation files, so that the minimum recovery
+ * point is always updated before the corresponding change in the data
+ * file is flushed to disk. We have to do the same manually here.
*
* Doing this before the truncation means that if the truncation fails
* for some reason, you cannot start up the system even after restart,
/*
* Most of the argument-checking is done inside of AggregateCreate
*/
- return AggregateCreate(aggName, /* aggregate name */
+ return AggregateCreate(aggName, /* aggregate name */
aggNamespace, /* namespace */
- aggArgTypes, /* input data type(s) */
+ aggArgTypes, /* input data type(s) */
numArgs,
transfuncName, /* step function name */
finalfuncName, /* final function name */
sortoperatorName, /* sort operator name */
- transTypeId, /* transition data type */
+ transTypeId, /* transition data type */
initval); /* initial condition */
}
#include "utils/tqual.h"
-static Oid AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
+static Oid AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
/*
* Raise an error to the effect that an object of the given name is already
static void
report_name_conflict(Oid classId, const char *name)
{
- char *msgfmt;
+ char *msgfmt;
switch (classId)
{
static void
report_namespace_conflict(Oid classId, const char *name, Oid nspOid)
{
- char *msgfmt;
+ char *msgfmt;
Assert(OidIsValid(nspOid));
}
/*
- * Check for duplicate name (more friendly than unique-index failure).
- * Since this is just a friendliness check, we can just skip it in cases
- * where there isn't suitable support.
- */
+ * Check for duplicate name (more friendly than unique-index failure).
+ * Since this is just a friendliness check, we can just skip it in cases
+ * where there isn't suitable support.
+ */
if (classId == ProcedureRelationId)
{
Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(oldtup);
case OBJECT_TSPARSER:
case OBJECT_TSTEMPLATE:
{
- ObjectAddress address;
- Relation catalog;
- Relation relation;
+ ObjectAddress address;
+ Relation catalog;
+ Relation relation;
address = get_object_address(stmt->renameType,
stmt->object, stmt->objarg,
default:
elog(ERROR, "unrecognized rename stmt type: %d",
(int) stmt->renameType);
- return InvalidOid; /* keep compiler happy */
+ return InvalidOid; /* keep compiler happy */
}
}
return AlterEventTriggerOwner(strVal(linitial(stmt->object)),
newowner);
- /* Generic cases */
+ /* Generic cases */
case OBJECT_AGGREGATE:
case OBJECT_COLLATION:
case OBJECT_CONVERSION:
Relation catalog;
Relation relation;
Oid classId;
- ObjectAddress address;
+ ObjectAddress address;
address = get_object_address(stmt->objectType,
stmt->object,
/* Superusers can bypass permission checks */
if (!superuser())
{
- AclObjectKind aclkind = get_object_aclkind(classId);
+ AclObjectKind aclkind = get_object_aclkind(classId);
/* must be owner */
if (!has_privs_of_role(GetUserId(), old_ownerId))
{
- char *objname;
- char namebuf[NAMEDATALEN];
+ char *objname;
+ char namebuf[NAMEDATALEN];
if (Anum_name != InvalidAttrNumber)
{
/* New owner must have CREATE privilege on namespace */
if (OidIsValid(namespaceId))
{
- AclResult aclresult;
+ AclResult aclresult;
aclresult = pg_namespace_aclcheck(namespaceId, new_ownerId,
ACL_CREATE);
Anum_acl, RelationGetDescr(rel), &isnull);
if (!isnull)
{
- Acl *newAcl;
+ Acl *newAcl;
newAcl = aclnewowner(DatumGetAclP(datum),
old_ownerId, new_ownerId);
Assert(listenChannels == NIL); /* else caller error */
- if (!amRegisteredListener) /* nothing to do */
+ if (!amRegisteredListener) /* nothing to do */
return;
LWLockAcquire(AsyncQueueLock, LW_SHARED);
/*
* If we LISTEN but then roll back the transaction after PreCommit_Notify,
* we have registered as a listener but have not made any entry in
- * listenChannels. In that case, deregister again.
+ * listenChannels. In that case, deregister again.
*/
if (amRegisteredListener && listenChannels == NIL)
asyncQueueUnregister();
bool is_system_catalog;
bool swap_toast_by_content;
TransactionId frozenXid;
- MultiXactId frozenMulti;
+ MultiXactId frozenMulti;
/* Mark the correct index as clustered */
if (OidIsValid(indexOid))
bool is_system_catalog;
TransactionId OldestXmin;
TransactionId FreezeXid;
- MultiXactId MultiXactFrzLimit;
+ MultiXactId MultiXactFrzLimit;
RewriteState rwstate;
bool use_sort;
Tuplesortstate *tuplesort;
List *force_notnull; /* list of column names */
bool *force_notnull_flags; /* per-column CSV FNN flags */
bool convert_selectively; /* do selective binary conversion? */
- List *convert_select; /* list of column names (can be NIL) */
+ List *convert_select; /* list of column names (can be NIL) */
bool *convert_select_flags; /* per-column CSV/TEXT CS flags */
/* these are just for error messages, see CopyFromErrorCallback */
*/
StringInfoData line_buf;
bool line_buf_converted; /* converted to server encoding? */
- bool line_buf_valid; /* contains the row being processed? */
+ bool line_buf_valid; /* contains the row being processed? */
/*
* Finally, raw_buf holds raw data read from the data source (file or
ClosePipeToProgram(cstate);
/*
- * If ClosePipeToProgram() didn't throw an error,
- * the program terminated normally, but closed the
- * pipe first. Restore errno, and throw an error.
+ * If ClosePipeToProgram() didn't throw an error, the
+ * program terminated normally, but closed the pipe
+ * first. Restore errno, and throw an error.
*/
errno = EPIPE;
}
bool is_from = stmt->is_from;
bool pipe = (stmt->filename == NULL);
Relation rel;
- Oid relid;
+ Oid relid;
/* Disallow COPY to/from file or program except to superusers. */
if (!pipe && !superuser())
if (stmt->is_program)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to COPY to or from an external program"),
+ errmsg("must be superuser to COPY to or from an external program"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
}
if (stmt->relation)
else if (strcmp(defel->defname, "convert_selectively") == 0)
{
/*
- * Undocumented, not-accessible-from-SQL option: convert only
- * the named columns to binary form, storing the rest as NULLs.
- * It's allowed for the column list to be NIL.
+ * Undocumented, not-accessible-from-SQL option: convert only the
+ * named columns to binary form, storing the rest as NULLs. It's
+ * allowed for the column list to be NIL.
*/
if (cstate->convert_selectively)
ereport(ERROR,
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg_internal("selected column \"%s\" not referenced by COPY",
- NameStr(tupDesc->attrs[attnum - 1]->attname))));
+ NameStr(tupDesc->attrs[attnum - 1]->attname))));
cstate->convert_select_flags[attnum - 1] = true;
}
}
static void
ClosePipeToProgram(CopyState cstate)
{
- int pclose_rc;
+ int pclose_rc;
Assert(cstate->is_program);
Node *query,
const char *queryString,
const char *filename,
- bool is_program,
+ bool is_program,
List *attnamelist,
List *options)
{
}
else
{
- mode_t oumask; /* Pre-existing umask value */
+ mode_t oumask; /* Pre-existing umask value */
struct stat st;
/*
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask(S_IWGRP | S_IWOTH);
cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_W);
* Error is relevant to a particular line.
*
* If line_buf still contains the correct line, and it's already
- * transcoded, print it. If it's still in a foreign encoding,
- * it's quite likely that the error is precisely a failure to do
+ * transcoded, print it. If it's still in a foreign encoding, it's
+ * quite likely that the error is precisely a failure to do
* encoding conversion (ie, bad data). We dare not try to convert
* it, and at present there's no way to regurgitate it without
* conversion. So we have to punt and just report the line number.
}
/*
- * Optimize if new relfilenode was created in this subxact or
- * one of its committed children and we won't see those rows later
- * as part of an earlier scan or command. This ensures that if this
- * subtransaction aborts then the frozen rows won't be visible
- * after xact cleanup. Note that the stronger test of exactly
- * which subtransaction created it is crucial for correctness
- * of this optimisation.
+ * Optimize if new relfilenode was created in this subxact or one of its
+ * committed children and we won't see those rows later as part of an
+ * earlier scan or command. This ensures that if this subtransaction
+ * aborts then the frozen rows won't be visible after xact cleanup. Note
+ * that the stronger test of exactly which subtransaction created it is
+ * crucial for correctness of this optimisation.
*/
if (cstate->freeze)
{
if (!ThereAreNoPriorRegisteredSnapshots() || !ThereAreNoReadyPortals())
ereport(ERROR,
(ERRCODE_INVALID_TRANSACTION_STATE,
- errmsg("cannot perform FREEZE because of prior transaction activity")));
+ errmsg("cannot perform FREEZE because of prior transaction activity")));
if (cstate->rel->rd_createSubid != GetCurrentSubTransactionId() &&
- cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
+ cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
ereport(ERROR,
(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE,
errmsg("cannot perform FREEZE because the table was not created or truncated in the current subtransaction")));
CopyState
BeginCopyFrom(Relation rel,
const char *filename,
- bool is_program,
+ bool is_program,
List *attnamelist,
List *options)
{
int
GetIntoRelEFlags(IntoClause *intoClause)
{
- int flags;
+ int flags;
/*
* We need to tell the executor whether it has to produce OIDs or not,
if (is_matview)
{
/* StoreViewQuery scribbles on tree, so make a copy */
- Query *query = (Query *) copyObject(into->viewQuery);
+ Query *query = (Query *) copyObject(into->viewQuery);
StoreViewQuery(intoRelationId, query, false);
CommandCounterIncrement();
pgdbrel = heap_open(DatabaseRelationId, RowExclusiveLock);
if (!get_db_info(dbname, AccessExclusiveLock, &db_id, NULL, NULL,
- &db_istemplate, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
+ &db_istemplate, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
{
if (!missing_ok)
{
pgdbrel = heap_open(DatabaseRelationId, RowExclusiveLock);
if (!get_db_info(dbname, AccessExclusiveLock, &db_id, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, &src_tblspcoid, NULL, NULL))
+ NULL, NULL, NULL, NULL, NULL, &src_tblspcoid, NULL, NULL))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname)));
AlterDatabase(AlterDatabaseStmt *stmt, bool isTopLevel)
{
Relation rel;
- Oid dboid;
+ Oid dboid;
HeapTuple tuple,
newtuple;
ScanKeyData scankey;
errdetail_busy_db(int notherbackends, int npreparedxacts)
{
if (notherbackends > 0 && npreparedxacts > 0)
- /* We don't deal with singular versus plural here, since gettext
- * doesn't support multiple plurals in one string. */
+
+ /*
+ * We don't deal with singular versus plural here, since gettext
+ * doesn't support multiple plurals in one string.
+ */
errdetail("There are %d other session(s) and %d prepared transaction(s) using the database.",
notherbackends, npreparedxacts);
else if (notherbackends > 0)
notherbackends);
else
errdetail_plural("There is %d prepared transaction using the database.",
- "There are %d prepared transactions using the database.",
+ "There are %d prepared transactions using the database.",
npreparedxacts,
npreparedxacts);
return 0; /* just to keep ereport macro happy */
{
slist_head SQLDropList;
bool in_sql_drop;
- MemoryContext cxt;
+ MemoryContext cxt;
struct EventTriggerQueryState *previous;
} EventTriggerQueryState;
-EventTriggerQueryState *currentEventTriggerState = NULL;
+EventTriggerQueryState *currentEventTriggerState = NULL;
typedef struct
{
- const char *obtypename;
- bool supported;
+ const char *obtypename;
+ bool supported;
} event_trigger_support_data;
typedef enum
} event_trigger_command_tag_check_result;
static event_trigger_support_data event_trigger_support[] = {
- { "AGGREGATE", true },
- { "CAST", true },
- { "CONSTRAINT", true },
- { "COLLATION", true },
- { "CONVERSION", true },
- { "DATABASE", false },
- { "DOMAIN", true },
- { "EXTENSION", true },
- { "EVENT TRIGGER", false },
- { "FOREIGN DATA WRAPPER", true },
- { "FOREIGN TABLE", true },
- { "FUNCTION", true },
- { "INDEX", true },
- { "LANGUAGE", true },
- { "MATERIALIZED VIEW", true },
- { "OPERATOR", true },
- { "OPERATOR CLASS", true },
- { "OPERATOR FAMILY", true },
- { "ROLE", false },
- { "RULE", true },
- { "SCHEMA", true },
- { "SEQUENCE", true },
- { "SERVER", true },
- { "TABLE", true },
- { "TABLESPACE", false},
- { "TRIGGER", true },
- { "TEXT SEARCH CONFIGURATION", true },
- { "TEXT SEARCH DICTIONARY", true },
- { "TEXT SEARCH PARSER", true },
- { "TEXT SEARCH TEMPLATE", true },
- { "TYPE", true },
- { "USER MAPPING", true },
- { "VIEW", true },
- { NULL, false }
+ {"AGGREGATE", true},
+ {"CAST", true},
+ {"CONSTRAINT", true},
+ {"COLLATION", true},
+ {"CONVERSION", true},
+ {"DATABASE", false},
+ {"DOMAIN", true},
+ {"EXTENSION", true},
+ {"EVENT TRIGGER", false},
+ {"FOREIGN DATA WRAPPER", true},
+ {"FOREIGN TABLE", true},
+ {"FUNCTION", true},
+ {"INDEX", true},
+ {"LANGUAGE", true},
+ {"MATERIALIZED VIEW", true},
+ {"OPERATOR", true},
+ {"OPERATOR CLASS", true},
+ {"OPERATOR FAMILY", true},
+ {"ROLE", false},
+ {"RULE", true},
+ {"SCHEMA", true},
+ {"SEQUENCE", true},
+ {"SERVER", true},
+ {"TABLE", true},
+ {"TABLESPACE", false},
+ {"TRIGGER", true},
+ {"TEXT SEARCH CONFIGURATION", true},
+ {"TEXT SEARCH DICTIONARY", true},
+ {"TEXT SEARCH PARSER", true},
+ {"TEXT SEARCH TEMPLATE", true},
+ {"TYPE", true},
+ {"USER MAPPING", true},
+ {"VIEW", true},
+ {NULL, false}
};
/* Support for dropped objects */
typedef struct SQLDropObject
{
- ObjectAddress address;
- const char *schemaname;
- const char *objname;
- const char *objidentity;
- const char *objecttype;
- slist_node next;
+ ObjectAddress address;
+ const char *schemaname;
+ const char *objname;
+ const char *objidentity;
+ const char *objecttype;
+ slist_node next;
} SQLDropObject;
static void AlterEventTriggerOwner_internal(Relation rel,
- HeapTuple tup,
- Oid newOwnerId);
+ HeapTuple tup,
+ Oid newOwnerId);
static event_trigger_command_tag_check_result check_ddl_tag(const char *tag);
static void error_duplicate_filter_variable(const char *defname);
static Datum filter_list_to_array(List *filterlist);
static Oid insert_event_trigger_tuple(char *trigname, char *eventname,
- Oid evtOwner, Oid funcoid, List *tags);
+ Oid evtOwner, Oid funcoid, List *tags);
static void validate_ddl_tags(const char *filtervar, List *taglist);
static void EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata);
*/
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to create event trigger \"%s\"",
- stmt->trigname),
- errhint("Must be superuser to create an event trigger.")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied to create event trigger \"%s\"",
+ stmt->trigname),
+ errhint("Must be superuser to create an event trigger.")));
/* Validate event name. */
if (strcmp(stmt->eventname, "ddl_command_start") != 0 &&
strcmp(stmt->eventname, "ddl_command_end") != 0 &&
strcmp(stmt->eventname, "sql_drop") != 0)
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized event name \"%s\"",
- stmt->eventname)));
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized event name \"%s\"",
+ stmt->eventname)));
/* Validate filter conditions. */
- foreach (lc, stmt->whenclause)
+ foreach(lc, stmt->whenclause)
{
- DefElem *def = (DefElem *) lfirst(lc);
+ DefElem *def = (DefElem *) lfirst(lc);
if (strcmp(def->defname, "tag") == 0)
{
}
else
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized filter variable \"%s\"", def->defname)));
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized filter variable \"%s\"", def->defname)));
}
/* Validate tag list, if any. */
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("event trigger \"%s\" already exists",
- stmt->trigname)));
+ stmt->trigname)));
/* Find and validate the trigger function. */
funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
{
ListCell *lc;
- foreach (lc, taglist)
+ foreach(lc, taglist)
{
const char *tag = strVal(lfirst(lc));
event_trigger_command_tag_check_result result;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("filter value \"%s\" not recognized for filter variable \"%s\"",
- tag, filtervar)));
+ tag, filtervar)));
if (result == EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s represents an SQL statement name */
- errmsg("event triggers are not supported for %s",
- tag)));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ /* translator: %s represents an SQL statement name */
+ errmsg("event triggers are not supported for %s",
+ tag)));
}
}
check_ddl_tag(const char *tag)
{
const char *obtypename;
- event_trigger_support_data *etsd;
+ event_trigger_support_data *etsd;
/*
* Handle some idiosyncratic special cases.
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("filter variable \"%s\" specified more than once",
- defname)));
+ defname)));
}
/*
insert_event_trigger_tuple(char *trigname, char *eventname, Oid evtOwner,
Oid funcoid, List *taglist)
{
- Relation tgrel;
- Oid trigoid;
+ Relation tgrel;
+ Oid trigoid;
HeapTuple tuple;
Datum values[Natts_pg_trigger];
bool nulls[Natts_pg_trigger];
- ObjectAddress myself, referenced;
+ ObjectAddress myself,
+ referenced;
/* Open pg_event_trigger. */
tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock);
{
Relation tgrel;
HeapTuple tup;
- Oid trigoid;
+ Oid trigoid;
Form_pg_event_trigger evtForm;
- char tgenabled = stmt->tgenabled;
+ char tgenabled = stmt->tgenabled;
tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("event trigger \"%s\" does not exist",
- stmt->trigname)));
+ stmt->trigname)));
trigoid = HeapTupleGetOid(tup);
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("event trigger with OID %u does not exist", trigOid)));
+ errmsg("event trigger with OID %u does not exist", trigOid)));
AlterEventTriggerOwner_internal(rel, tup, newOwnerId);
if (!superuser_arg(newOwnerId))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to change owner of event trigger \"%s\"",
- NameStr(form->evtname)),
- errhint("The owner of an event trigger must be a superuser.")));
+ errmsg("permission denied to change owner of event trigger \"%s\"",
+ NameStr(form->evtname)),
+ errhint("The owner of an event trigger must be a superuser.")));
form->evtowner = newOwnerId;
simple_heap_update(rel, &tup->t_self, tup);
* tags matching.
*/
static bool
-filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
+filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
{
/*
* Filter by session replication role, knowing that we never see disabled
}
/*
- * Setup for running triggers for the given event. Return value is an OID list
+ * Setup for running triggers for the given event. Return value is an OID list
* of functions to run; if there are any, trigdata is filled with an
* appropriate EventTriggerData for them to receive.
*/
* invoked to match up exactly with the list that CREATE EVENT TRIGGER
* accepts. This debugging cross-check will throw an error if this
* function is invoked for a command tag that CREATE EVENT TRIGGER won't
- * accept. (Unfortunately, there doesn't seem to be any simple, automated
+ * accept. (Unfortunately, there doesn't seem to be any simple, automated
* way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that
* never reaches this control point.)
*
tag = CreateCommandTag(parsetree);
/*
- * Filter list of event triggers by command tag, and copy them into
- * our memory context. Once we start running the command trigers, or
- * indeed once we do anything at all that touches the catalogs, an
- * invalidation might leave cachelist pointing at garbage, so we must
- * do this before we can do much else.
+ * Filter list of event triggers by command tag, and copy them into our
+ * memory context. Once we start running the command trigers, or indeed
+ * once we do anything at all that touches the catalogs, an invalidation
+ * might leave cachelist pointing at garbage, so we must do this before we
+ * can do much else.
*/
- foreach (lc, cachelist)
+ foreach(lc, cachelist)
{
- EventTriggerCacheItem *item = lfirst(lc);
+ EventTriggerCacheItem *item = lfirst(lc);
if (filter_event_trigger(&tag, item))
{
EventTriggerDDLCommandStart(Node *parsetree)
{
List *runlist;
- EventTriggerData trigdata;
+ EventTriggerData trigdata;
/*
* Event Triggers are completely disabled in standalone mode. There are
return;
runlist = EventTriggerCommonSetup(parsetree,
- EVT_DDLCommandStart, "ddl_command_start",
+ EVT_DDLCommandStart, "ddl_command_start",
&trigdata);
if (runlist == NIL)
return;
list_free(runlist);
/*
- * Make sure anything the event triggers did will be visible to
- * the main command.
+ * Make sure anything the event triggers did will be visible to the main
+ * command.
*/
CommandCounterIncrement();
}
EventTriggerDDLCommandEnd(Node *parsetree)
{
List *runlist;
- EventTriggerData trigdata;
+ EventTriggerData trigdata;
/*
* See EventTriggerDDLCommandStart for a discussion about why event
return;
/*
- * Make sure anything the main command did will be visible to the
- * event triggers.
+ * Make sure anything the main command did will be visible to the event
+ * triggers.
*/
CommandCounterIncrement();
EventTriggerSQLDrop(Node *parsetree)
{
List *runlist;
- EventTriggerData trigdata;
+ EventTriggerData trigdata;
/*
* See EventTriggerDDLCommandStart for a discussion about why event
return;
/*
- * Use current state to determine whether this event fires at all. If there
- * are no triggers for the sql_drop event, then we don't have anything to do
- * here. Note that dropped object collection is disabled if this is the case,
- * so even if we were to try to run, the list would be empty.
+ * Use current state to determine whether this event fires at all. If
+ * there are no triggers for the sql_drop event, then we don't have
+ * anything to do here. Note that dropped object collection is disabled
+ * if this is the case, so even if we were to try to run, the list would
+ * be empty.
*/
if (!currentEventTriggerState ||
slist_is_empty(¤tEventTriggerState->SQLDropList))
runlist = EventTriggerCommonSetup(parsetree,
EVT_SQLDrop, "sql_drop",
&trigdata);
+
/*
- * Nothing to do if run list is empty. Note this shouldn't happen, because
- * if there are no sql_drop events, then objects-to-drop wouldn't have been
- * collected in the first place and we would have quitted above.
+ * Nothing to do if run list is empty. Note this shouldn't happen,
+ * because if there are no sql_drop events, then objects-to-drop wouldn't
+ * have been collected in the first place and we would have quitted above.
*/
if (runlist == NIL)
return;
/*
- * Make sure anything the main command did will be visible to the
- * event triggers.
+ * Make sure anything the main command did will be visible to the event
+ * triggers.
*/
CommandCounterIncrement();
/*
- * Make sure pg_event_trigger_dropped_objects only works when running these
- * triggers. Use PG_TRY to ensure in_sql_drop is reset even when one
- * trigger fails. (This is perhaps not necessary, as the currentState
+ * Make sure pg_event_trigger_dropped_objects only works when running
+ * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
+ * one trigger fails. (This is perhaps not necessary, as the currentState
* variable will be removed shortly by our caller, but it seems better to
* play safe.)
*/
static void
EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata)
{
- MemoryContext context;
- MemoryContext oldcontext;
- ListCell *lc;
- bool first = true;
+ MemoryContext context;
+ MemoryContext oldcontext;
+ ListCell *lc;
+ bool first = true;
/* Guard against stack overflow due to recursive event trigger */
check_stack_depth();
/*
- * Let's evaluate event triggers in their own memory context, so
- * that any leaks get cleaned up promptly.
+ * Let's evaluate event triggers in their own memory context, so that any
+ * leaks get cleaned up promptly.
*/
context = AllocSetContextCreate(CurrentMemoryContext,
"event trigger context",
oldcontext = MemoryContextSwitchTo(context);
/* Call each event trigger. */
- foreach (lc, fn_oid_list)
+ foreach(lc, fn_oid_list)
{
- Oid fnoid = lfirst_oid(lc);
- FmgrInfo flinfo;
+ Oid fnoid = lfirst_oid(lc);
+ FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
PgStat_FunctionCallUsage fcusage;
/*
- * We want each event trigger to be able to see the results of
- * the previous event trigger's action. Caller is responsible
- * for any command-counter increment that is needed between the
- * event trigger and anything else in the transaction.
+ * We want each event trigger to be able to see the results of the
+ * previous event trigger's action. Caller is responsible for any
+ * command-counter increment that is needed between the event trigger
+ * and anything else in the transaction.
*/
if (first)
first = false;
return true;
case MAX_OCLASS:
+
/*
* This shouldn't ever happen, but we keep the case to avoid a
* compiler warning without a "default" clause in the switch.
EventTriggerBeginCompleteQuery(void)
{
EventTriggerQueryState *state;
- MemoryContext cxt;
+ MemoryContext cxt;
/*
* Currently, sql_drop events are the only reason to have event trigger
* returned false previously.
*
* Note: this might be called in the PG_CATCH block of a failing transaction,
- * so be wary of running anything unnecessary. (In particular, it's probably
+ * so be wary of running anything unnecessary. (In particular, it's probably
* unwise to try to allocate memory.)
*/
void
void
EventTriggerSQLDropAddObject(ObjectAddress *object)
{
- SQLDropObject *obj;
- MemoryContext oldcxt;
+ SQLDropObject *obj;
+ MemoryContext oldcxt;
if (!currentEventTriggerState)
return;
/*
* Obtain schema names from the object's catalog tuple, if one exists;
- * this lets us skip objects in temp schemas. We trust that ObjectProperty
- * contains all object classes that can be schema-qualified.
+ * this lets us skip objects in temp schemas. We trust that
+ * ObjectProperty contains all object classes that can be
+ * schema-qualified.
*/
if (is_objectclass_supported(object->classId))
{
RelationGetDescr(catalog), &isnull);
if (!isnull)
{
- Oid namespaceId;
+ Oid namespaceId;
namespaceId = DatumGetObjectId(datum);
/* Don't report objects in temp namespaces */
Datum
pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- slist_iter iter;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ slist_iter iter;
/*
* Protect this function from being called out of context
!currentEventTriggerState->in_sql_drop)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("%s can only be called in a sql_drop event trigger function",
- "pg_event_trigger_dropped_objects()")));
+ errmsg("%s can only be called in a sql_drop event trigger function",
+ "pg_event_trigger_dropped_objects()")));
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
instrument_option |= INSTRUMENT_BUFFERS;
/*
- * We always collect timing for the entire statement, even when
- * node-level timing is off, so we don't look at es->timing here.
+ * We always collect timing for the entire statement, even when node-level
+ * timing is off, so we don't look at es->timing here.
*/
INSTR_TIME_SET_CURRENT(starttime);
GetUserId(),
languageOid,
languageValidator,
- prosrc_str, /* converted to text later */
- probin_str, /* converted to text later */
+ prosrc_str, /* converted to text later */
+ probin_str, /* converted to text later */
false, /* not an aggregate */
isWindowFunc,
security,
* (but not VACUUM).
*/
rel = heap_openrv(stmt->relation,
- (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
+ (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
relationId = RelationGetRelid(rel);
namespaceId = RelationGetNamespace(rel);
* Drop the reference snapshot. We must do this before waiting out other
* snapshot holders, else we will deadlock against other processes also
* doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one
- * they must wait for. But first, save the snapshot's xmin to use as
+ * they must wait for. But first, save the snapshot's xmin to use as
* limitXmin for GetCurrentVirtualXIDs().
*/
limitXmin = snapshot->xmin;
static void transientrel_shutdown(DestReceiver *self);
static void transientrel_destroy(DestReceiver *self);
static void refresh_matview_datafill(DestReceiver *dest, Query *query,
- const char *queryString);
+ const char *queryString);
/*
* SetMatViewPopulatedState
*/
void
ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
- ParamListInfo params, char *completionTag)
+ ParamListInfo params, char *completionTag)
{
Oid matviewOid;
Relation matviewRel;
* Get a lock until end of transaction.
*/
matviewOid = RangeVarGetRelidExtended(stmt->relation,
- AccessExclusiveLock, false, false,
- RangeVarCallbackOwnsTable, NULL);
+ AccessExclusiveLock, false, false,
+ RangeVarCallbackOwnsTable, NULL);
matviewRel = heap_open(matviewOid, NoLock);
/* Make sure it is a materialized view. */
refresh_matview_datafill(DestReceiver *dest, Query *query,
const char *queryString)
{
- List *rewritten;
+ List *rewritten;
PlannedStmt *plan;
QueryDesc *queryDesc;
transientrel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
{
DR_transientrel *myState = (DR_transientrel *) self;
- Relation transientrel;
+ Relation transientrel;
transientrel = heap_open(myState->transientoid, NoLock);
* Subroutine for ALTER OPERATOR CLASS SET SCHEMA/RENAME
*
* Is there an operator class with the given name and signature already
- * in the given namespace? If so, raise an appropriate error message.
+ * in the given namespace? If so, raise an appropriate error message.
*/
void
IsThereOpClassInNamespace(const char *opcname, Oid opcmethod,
* Subroutine for ALTER OPERATOR FAMILY SET SCHEMA/RENAME
*
* Is there an operator family with the given name and signature already
- * in the given namespace? If so, raise an appropriate error message.
+ * in the given namespace? If so, raise an appropriate error message.
*/
void
IsThereOpFamilyInNamespace(const char *opfname, Oid opfmethod,
* now have OperatorCreate do all the work..
*/
return
- OperatorCreate(oprName, /* operator name */
+ OperatorCreate(oprName, /* operator name */
oprNamespace, /* namespace */
- typeId1, /* left type id */
- typeId2, /* right type id */
- functionOid, /* function for operator */
- commutatorName, /* optional commutator operator name */
- negatorName, /* optional negator operator name */
- restrictionOid, /* optional restrict. sel. procedure */
- joinOid, /* optional join sel. procedure name */
+ typeId1, /* left type id */
+ typeId2, /* right type id */
+ functionOid, /* function for operator */
+ commutatorName, /* optional commutator operator name */
+ negatorName, /* optional negator operator name */
+ restrictionOid, /* optional restrict. sel. procedure */
+ joinOid, /* optional join sel. procedure name */
canMerge, /* operator merges */
canHash); /* operator hashes */
}
} PLTemplate;
static Oid create_proc_lang(const char *languageName, bool replace,
- Oid languageOwner, Oid handlerOid, Oid inlineOid,
- Oid valOid, bool trusted);
+ Oid languageOwner, Oid handlerOid, Oid inlineOid,
+ Oid valOid, bool trusted);
static PLTemplate *find_language_template(const char *languageName);
/* ---------------------------------------------------------------------
/*
* We must mark the buffer dirty before doing XLogInsert(); see notes in
* SyncOneBuffer(). However, we don't apply the desired changes just yet.
- * This looks like a violation of the buffer update protocol, but it is
- * in fact safe because we hold exclusive lock on the buffer. Any other
+ * This looks like a violation of the buffer update protocol, but it is in
+ * fact safe because we hold exclusive lock on the buffer. Any other
* process, including a checkpoint, that tries to examine the buffer
* contents will block until we release the lock, and then will see the
* final state that we install below.
}
/*
- * We must reset log_cnt when isInit or when changing any parameters
- * that would affect future nextval allocations.
+ * We must reset log_cnt when isInit or when changing any parameters that
+ * would affect future nextval allocations.
*/
if (isInit)
new->log_cnt = 0;
int16 seqNumber, Relation inhRelation);
static int findAttrByName(const char *attributeName, List *schema);
static void AlterIndexNamespaces(Relation classRel, Relation rel,
- Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved);
+ Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved);
static void AlterSeqNamespaces(Relation classRel, Relation rel,
Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved,
LOCKMODE lockmode);
{
Oid heap_relid;
Oid toast_relid;
- MultiXactId minmulti;
+ MultiXactId minmulti;
/*
* This effectively deletes all rows in the table, and may be done
&found_whole_row);
/*
- * For the moment we have to reject whole-row variables.
- * We could convert them, if we knew the new table's rowtype
- * OID, but that hasn't been assigned yet.
+ * For the moment we have to reject whole-row variables. We
+ * could convert them, if we knew the new table's rowtype OID,
+ * but that hasn't been assigned yet.
*/
if (found_whole_row)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert whole-row table reference"),
+ errmsg("cannot convert whole-row table reference"),
errdetail("Constraint \"%s\" contains a whole-row reference to table \"%s\".",
name,
RelationGetRelationName(relation))));
Relation targetrelation;
Relation attrelation;
HeapTuple atttup;
- Form_pg_attribute attform;
+ Form_pg_attribute attform;
int attnum;
/*
rename_constraint_internal(relid, typid,
stmt->subname,
stmt->newname,
- stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
- false, /* recursing? */
+ stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
+ false, /* recursing? */
0 /* expected inhcount */ );
}
case AT_ColumnDefault:
case AT_ProcessedConstraint: /* becomes AT_AddConstraint */
case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */
- case AT_ReAddConstraint: /* becomes AT_AddConstraint */
+ case AT_ReAddConstraint: /* becomes AT_AddConstraint */
case AT_EnableTrig:
case AT_EnableAlwaysTrig:
case AT_EnableReplicaTrig:
ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
true, false, lockmode);
break;
- case AT_ReAddConstraint: /* Re-add pre-existing check constraint */
+ case AT_ReAddConstraint: /* Re-add pre-existing check
+ * constraint */
ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
false, true, lockmode);
break;
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" contains null values",
- NameStr(newTupDesc->attrs[attn]->attname)),
+ NameStr(newTupDesc->attrs[attn]->attname)),
errtablecol(oldrel, attn + 1)));
}
stmt->deferrable,
stmt->initdeferred,
stmt->primary,
- true, /* update pg_index */
- true, /* remove old dependencies */
+ true, /* update pg_index */
+ true, /* remove old dependencies */
allowSystemTableMods,
- false); /* is_internal */
+ false); /* is_internal */
index_close(indexRel, NoLock);
}
!parent_rel->rd_islocaltemp)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation of another session")));
+ errmsg("cannot inherit from temporary relation of another session")));
/* Ditto for the child */
if (child_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
!child_rel->rd_islocaltemp)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit to temporary relation of another session")));
+ errmsg("cannot inherit to temporary relation of another session")));
/*
* Check for duplicates in the list of parents, and determine the highest
RelationGetRelid(parent_rel));
/*
- * Post alter hook of this inherits. Since object_access_hook doesn't
- * take multiple object identifiers, we relay oid of parent relation
- * using auxiliary_id argument.
+ * Post alter hook of this inherits. Since object_access_hook doesn't take
+ * multiple object identifiers, we relay oid of parent relation using
+ * auxiliary_id argument.
*/
InvokeObjectPostAlterHookArg(InheritsRelationId,
RelationGetRelid(rel), 0,
void
AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
Oid oldNspOid, Oid newNspOid,
- bool hasDependEntry, ObjectAddresses *objsMoved)
+ bool hasDependEntry, ObjectAddresses *objsMoved)
{
HeapTuple classTup;
Form_pg_class classForm;
- ObjectAddress thisobj;
+ ObjectAddress thisobj;
classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid));
if (!HeapTupleIsValid(classTup))
/* Update dependency on schema if caller said so */
if (hasDependEntry &&
changeDependencyFor(RelationRelationId, relOid,
- NamespaceRelationId, oldNspOid, newNspOid) != 1)
+ NamespaceRelationId, oldNspOid, newNspOid) != 1)
elog(ERROR, "failed to change schema dependency for relation \"%s\"",
NameStr(classForm->relname));
/* Do nothing (there shouldn't be such entries, actually) */
break;
case ONCOMMIT_DELETE_ROWS:
+
/*
* If this transaction hasn't accessed any temporary
* relations, we can skip truncating ON COMMIT DELETE ROWS
* This is intended as a callback for RangeVarGetRelidExtended(). It allows
* the relation to be locked only if (1) it's a plain table, materialized
* view, or TOAST table and (2) the current user is the owner (or the
- * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
+ * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
* TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be
* used by all.
*/
true, /* islocal */
0, /* inhcount */
true, /* isnoinherit */
- isInternal); /* is_internal */
+ isInternal); /* is_internal */
}
/*
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
{
tgoid = HeapTupleGetOid(tuple);
+
/*
* Update pg_trigger tuple with new tgname.
*/
if (trigdesc && trigdesc->trig_delete_after_row)
{
HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, LockTupleExclusive,
+ tupleid, LockTupleExclusive,
NULL);
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
if (trigdesc && trigdesc->trig_update_after_row)
{
HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, LockTupleExclusive,
+ tupleid, LockTupleExclusive,
NULL);
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
tuple.t_self = *tid;
test = heap_lock_tuple(relation, &tuple,
estate->es_output_cid,
- lockmode, false /* wait */,
+ lockmode, false /* wait */ ,
false, &buffer, &hufd);
switch (test)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
array_type, /* type name */
typeNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
- 0, /* relation kind (ditto) */
- GetUserId(), /* owner's ID */
- -1, /* internal size (always varlena) */
+ 0, /* relation kind (ditto) */
+ GetUserId(), /* owner's ID */
+ -1, /* internal size (always varlena) */
TYPTYPE_BASE, /* type-type (base type) */
TYPCATEGORY_ARRAY, /* type-category (array) */
- false, /* array types are never preferred */
+ false, /* array types are never preferred */
delimiter, /* array element delimiter */
F_ARRAY_IN, /* input procedure */
- F_ARRAY_OUT, /* output procedure */
+ F_ARRAY_OUT, /* output procedure */
F_ARRAY_RECV, /* receive procedure */
F_ARRAY_SEND, /* send procedure */
- typmodinOid, /* typmodin procedure */
+ typmodinOid, /* typmodin procedure */
typmodoutOid, /* typmodout procedure */
F_ARRAY_TYPANALYZE, /* analyze procedure */
- typoid, /* element type ID */
- true, /* yes this is an array type */
+ typoid, /* element type ID */
+ true, /* yes this is an array type */
InvalidOid, /* no further array type */
InvalidOid, /* base type ID */
- NULL, /* never a default type value */
- NULL, /* binary default isn't sent either */
- false, /* never passed by value */
+ NULL, /* never a default type value */
+ NULL, /* binary default isn't sent either */
+ false, /* never passed by value */
alignment, /* see above */
- 'x', /* ARRAY is always toastable */
- -1, /* typMod (Domains only) */
- 0, /* Array dimensions of typbasetype */
- false, /* Type NOT NULL */
+ 'x', /* ARRAY is always toastable */
+ -1, /* typMod (Domains only) */
+ 0, /* Array dimensions of typbasetype */
+ false, /* Type NOT NULL */
collation); /* type's collation */
pfree(array_type);
/*
* Check constraints are handled after domain creation, as
* they require the Oid of the domain; at this point we can
- * only check that they're not marked NO INHERIT, because
- * that would be bogus.
+ * only check that they're not marked NO INHERIT, because that
+ * would be bogus.
*/
if (constr->is_no_inherit)
ereport(ERROR,
/*
* Ordinarily we disallow adding values within transaction blocks, because
* we can't cope with enum OID values getting into indexes and then having
- * their defining pg_enum entries go away. However, it's okay if the enum
- * type was created in the current transaction, since then there can be
- * no such indexes that wouldn't themselves go away on rollback. (We
- * support this case because pg_dump --binary-upgrade needs it.) We test
- * this by seeing if the pg_type row has xmin == current XID and is not
- * HEAP_UPDATED. If it is HEAP_UPDATED, we can't be sure whether the
- * type was created or only modified in this xact. So we are disallowing
- * some cases that could theoretically be safe; but fortunately pg_dump
- * only needs the simplest case.
+ * their defining pg_enum entries go away. However, it's okay if the enum
+ * type was created in the current transaction, since then there can be no
+ * such indexes that wouldn't themselves go away on rollback. (We support
+ * this case because pg_dump --binary-upgrade needs it.) We test this by
+ * seeing if the pg_type row has xmin == current XID and is not
+ * HEAP_UPDATED. If it is HEAP_UPDATED, we can't be sure whether the type
+ * was created or only modified in this xact. So we are disallowing some
+ * cases that could theoretically be safe; but fortunately pg_dump only
+ * needs the simplest case.
*/
if (HeapTupleHeaderGetXmin(tup->t_data) == GetCurrentTransactionId() &&
!(tup->t_data->t_infomask & HEAP_UPDATED))
- /* safe to do inside transaction block */ ;
+ /* safe to do inside transaction block */ ;
else
PreventTransactionChain(isTopLevel, "ALTER TYPE ... ADD");
/*
* In principle the auxiliary information for this
* error should be errdatatype(), but errtablecol()
- * seems considerably more useful in practice. Since
+ * seems considerably more useful in practice. Since
* this code only executes in an ALTER DOMAIN command,
* the client should already know which domain is in
* question.
/*
* In principle the auxiliary information for this error
* should be errdomainconstraint(), but errtablecol()
- * seems considerably more useful in practice. Since this
+ * seems considerably more useful in practice. Since this
* code only executes in an ALTER DOMAIN command, the
* client should already know which domain is in question,
* and which constraint too.
true, /* is local */
0, /* inhcount */
false, /* connoinherit */
- false); /* is_internal */
+ false); /* is_internal */
/*
* Return the compiled constraint expression so the calling routine can
* hasDependEntry should be TRUE if type is expected to have a pg_shdepend
* entry (ie, it's not a table rowtype nor an array type).
* is_primary_ops should be TRUE if this function is invoked with user's
- * direct operation (e.g, shdepReassignOwned). Elsewhere,
+ * direct operation (e.g, shdepReassignOwned). Elsewhere,
*/
void
AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId,
TypeName *typename;
Oid typeOid;
Oid nspOid;
- ObjectAddresses *objsMoved;
+ ObjectAddresses *objsMoved;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeTypeNameFromNameList(names);
{
HeapTuple roletuple;
Oid databaseid = InvalidOid;
- Oid roleid = InvalidOid;
+ Oid roleid = InvalidOid;
if (stmt->role)
{
* Since we don't take a lock here, the relation might be gone, or the
* RangeVar might no longer refer to the OID we look up here. In the
* former case, VACUUM will do nothing; in the latter case, it will
- * process the OID we looked up here, rather than the new one.
- * Neither is ideal, but there's little practical alternative, since
- * we're going to commit this transaction and begin a new one between
- * now and then.
+ * process the OID we looked up here, rather than the new one. Neither
+ * is ideal, but there's little practical alternative, since we're
+ * going to commit this transaction and begin a new one between now
+ * and then.
*/
relid = RangeVarGetRelid(vacrel, NoLock, false);
if (multiXactFrzLimit != NULL)
{
- MultiXactId mxLimit;
+ MultiXactId mxLimit;
/*
* simplistic multixactid freezing: use the same freezing policy as
SysScanDesc scan;
HeapTuple classTup;
TransactionId newFrozenXid;
- MultiXactId newFrozenMulti;
+ MultiXactId newFrozenMulti;
bool dirty = false;
/*
newFrozenXid = GetOldestXmin(true, true);
/*
- * Similarly, initialize the MultiXact "min" with the value that would
- * be used on pg_class for new tables. See AddNewRelationTuple().
+ * Similarly, initialize the MultiXact "min" with the value that would be
+ * used on pg_class for new tables. See AddNewRelationTuple().
*/
newFrozenMulti = GetOldestMultiXactId();
/*
* Update the wrap limit for GetNewTransactionId and creation of new
- * MultiXactIds. Note: these functions will also signal the postmaster for
- * an(other) autovac cycle if needed. XXX should we avoid possibly
+ * MultiXactIds. Note: these functions will also signal the postmaster
+ * for an(other) autovac cycle if needed. XXX should we avoid possibly
* signalling twice?
*/
SetTransactionIdLimit(frozenXID, oldestxid_datoid);
* that the potential for improvement was great enough to merit the cost of
* supporting them.
*/
-#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
-#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
-#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
+#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
+#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
+#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
/*
* Guesstimation of number of dead tuples per page. This is used to
double new_rel_tuples;
BlockNumber new_rel_allvisible;
TransactionId new_frozen_xid;
- MultiXactId new_min_multi;
+ MultiXactId new_min_multi;
/* measure elapsed time iff autovacuum logging requires it */
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
/* report results to the stats collector, too */
pgstat_report_vacuum(RelationGetRelid(onerel),
- onerel->rd_rel->relisshared,
- new_rel_tuples);
+ onerel->rd_rel->relisshared,
+ new_rel_tuples);
/* and log the action if appropriate */
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
"pages: %d removed, %d remain\n"
"tuples: %.0f removed, %.0f remain\n"
"buffer usage: %d hits, %d misses, %d dirtied\n"
- "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
+ "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
"system usage: %s",
get_database_name(MyDatabaseId),
get_namespace_name(RelationGetNamespace(onerel)),
/*
* It should never be the case that the visibility map page is set
* while the page-level bit is clear, but the reverse is allowed
- * (if checksums are not enabled). Regardless, set the both bits
+ * (if checksums are not enabled). Regardless, set the both bits
* so that we get back in sync.
*
* NB: If the heap page is all-visible but the VM bit is not set,
- * we don't need to dirty the heap page. However, if checksums are
- * enabled, we do need to make sure that the heap page is dirtied
- * before passing it to visibilitymap_set(), because it may be
- * logged. Given that this situation should only happen in rare
- * cases after a crash, it is not worth optimizing.
+ * we don't need to dirty the heap page. However, if checksums
+ * are enabled, we do need to make sure that the heap page is
+ * dirtied before passing it to visibilitymap_set(), because it
+ * may be logged. Given that this situation should only happen in
+ * rare cases after a crash, it is not worth optimizing.
*/
PageSetAllVisible(page);
MarkBufferDirty(buf);
Page page = BufferGetPage(buffer);
OffsetNumber unused[MaxOffsetNumber];
int uncnt = 0;
- TransactionId visibility_cutoff_xid;
+ TransactionId visibility_cutoff_xid;
START_CRIT_SECTION();
MarkBufferDirty(buffer);
/*
- * Now that we have removed the dead tuples from the page, once again check
- * if the page has become all-visible.
+ * Now that we have removed the dead tuples from the page, once again
+ * check if the page has become all-visible.
*/
if (!visibilitymap_test(onerel, blkno, vmbuffer) &&
heap_page_is_all_visible(buffer, &visibility_cutoff_xid))
Assert(BufferIsValid(*vmbuffer));
PageSetAllVisible(page);
visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer,
- visibility_cutoff_xid);
+ visibility_cutoff_xid);
}
/* XLOG stuff */
static bool
heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
{
- Page page = BufferGetPage(buf);
+ Page page = BufferGetPage(buf);
OffsetNumber offnum,
- maxoff;
- bool all_visible = true;
+ maxoff;
+ bool all_visible = true;
*visibility_cutoff_xid = InvalidTransactionId;
/*
* This is a stripped down version of the line pointer scan in
- * lazy_scan_heap(). So if you change anything here, also check that
- * code.
+ * lazy_scan_heap(). So if you change anything here, also check that code.
*/
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
- offnum <= maxoff && all_visible;
- offnum = OffsetNumberNext(offnum))
+ offnum <= maxoff && all_visible;
+ offnum = OffsetNumberNext(offnum))
{
- ItemId itemid;
- HeapTupleData tuple;
+ ItemId itemid;
+ HeapTupleData tuple;
itemid = PageGetItemId(page, offnum);
ItemPointerSet(&(tuple.t_self), BufferGetBlockNumber(buf), offnum);
/*
- * Dead line pointers can have index pointers pointing to them. So they
- * can't be treated as visible
+ * Dead line pointers can have index pointers pointing to them. So
+ * they can't be treated as visible
*/
if (ItemIdIsDead(itemid))
{
}
/*
- * The inserter definitely committed. But is it old
- * enough that everyone sees it as committed?
+ * The inserter definitely committed. But is it old enough
+ * that everyone sees it as committed?
*/
xmin = HeapTupleHeaderGetXmin(tuple.t_data);
if (!TransactionIdPrecedes(xmin, OldestXmin))
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
break;
}
- } /* scan along page */
+ } /* scan along page */
return all_visible;
}
RelationGetRelationName(resultRel))));
break;
case RELKIND_VIEW:
+
/*
* Okay only if there's a suitable INSTEAD OF trigger. Messages
* here should match rewriteHandler.c's rewriteTargetView, except
* that we omit errdetail because we haven't got the information
- * handy (and given that we really shouldn't get here anyway,
- * it's not worth great exertion to get).
+ * handy (and given that we really shouldn't get here anyway, it's
+ * not worth great exertion to get).
*/
switch (operation)
{
if (fdwroutine->ExecForeignInsert == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot insert into foreign table \"%s\"",
- RelationGetRelationName(resultRel))));
+ errmsg("cannot insert into foreign table \"%s\"",
+ RelationGetRelationName(resultRel))));
break;
case CMD_UPDATE:
if (fdwroutine->ExecForeignUpdate == NULL)
if (fdwroutine->ExecForeignDelete == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot delete from foreign table \"%s\"",
- RelationGetRelationName(resultRel))));
+ errmsg("cannot delete from foreign table \"%s\"",
+ RelationGetRelationName(resultRel))));
break;
default:
elog(ERROR, "unrecognized CmdType: %d", (int) operation);
}
/*
- * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping locks
+ * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
+ * locks
*/
foreach(l, estate->es_rowMarks)
{
qual = resultRelInfo->ri_ConstraintExprs[i];
/*
- * NOTE: SQL specifies that a NULL result from a constraint
- * expression is not to be treated as a failure. Therefore, tell
- * ExecQual to return TRUE for NULL.
+ * NOTE: SQL specifies that a NULL result from a constraint expression
+ * is not to be treated as a failure. Therefore, tell ExecQual to
+ * return TRUE for NULL.
*/
if (!ExecQual(qual, econtext, true))
return check[i].ccname;
/*
* If tuple was inserted by our own transaction, we have to check
* cmin against es_output_cid: cmin >= current CID means our
- * command cannot see the tuple, so we should ignore it.
- * Otherwise heap_lock_tuple() will throw an error, and so would
- * any later attempt to update or delete the tuple. (We need not
- * check cmax because HeapTupleSatisfiesDirty will consider a
- * tuple deleted by our transaction dead, regardless of cmax.)
- * Wee just checked that priorXmax == xmin, so we can test that
- * variable instead of doing HeapTupleHeaderGetXmin again.
+ * command cannot see the tuple, so we should ignore it. Otherwise
+ * heap_lock_tuple() will throw an error, and so would any later
+ * attempt to update or delete the tuple. (We need not check cmax
+ * because HeapTupleSatisfiesDirty will consider a tuple deleted
+ * by our transaction dead, regardless of cmax.) Wee just checked
+ * that priorXmax == xmin, so we can test that variable instead of
+ * doing HeapTupleHeaderGetXmin again.
*/
if (TransactionIdIsCurrentTransactionId(priorXmax) &&
HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
*/
test = heap_lock_tuple(relation, &tuple,
estate->es_output_cid,
- lockmode, false /* wait */,
+ lockmode, false /* wait */ ,
false, &buffer, &hufd);
/* We now have two pins on the buffer, get rid of one */
ReleaseBuffer(buffer);
switch (test)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("WHERE CURRENT OF is not supported for this table type")));
+ errmsg("WHERE CURRENT OF is not supported for this table type")));
return 0; /* keep compiler quiet */
}
rettype,
-1,
get_typcollation(rettype),
- COERCE_IMPLICIT_CAST);
+ COERCE_IMPLICIT_CAST);
/* Relabel is dangerous if sort/group or setop column */
if (tle->ressortgroupref != 0 || parse->setOperations)
*modifyTargetList = true;
atttype,
-1,
get_typcollation(atttype),
- COERCE_IMPLICIT_CAST);
+ COERCE_IMPLICIT_CAST);
/* Relabel is dangerous if sort/group or setop column */
if (tle->ressortgroupref != 0 || parse->setOperations)
*modifyTargetList = true;
break;
default:
elog(ERROR, "unsupported rowmark type");
- lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
+ lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
break;
}
switch (test)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
result = heap_delete(resultRelationDesc, tupleid,
estate->es_output_cid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd);
switch (result)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
* transaction. The former case is possible in a join DELETE
- * where multiple tuples join to the same target tuple.
- * This is somewhat questionable, but Postgres has always
- * allowed it: we just ignore additional deletion attempts.
+ * where multiple tuples join to the same target tuple. This
+ * is somewhat questionable, but Postgres has always allowed
+ * it: we just ignore additional deletion attempts.
*
* The latter case arises if the tuple is modified by a
* command in a BEFORE trigger, or perhaps by a command in a
* proceed. We don't want to discard the original DELETE
* while keeping the triggered actions based on its deletion;
* and it would be no better to allow the original DELETE
- * while discarding updates that it triggered. The row update
+ * while discarding updates that it triggered. The row update
* carries some information that might be important according
* to business rules; so throwing an error is the only safe
* course.
*
- * If a trigger actually intends this type of interaction,
- * it can re-execute the DELETE and then return NULL to
- * cancel the outer delete.
+ * If a trigger actually intends this type of interaction, it
+ * can re-execute the DELETE and then return NULL to cancel
+ * the outer delete.
*/
if (hufd.cmax != estate->es_output_cid)
ereport(ERROR,
}
else
{
- LockTupleMode lockmode;
+ LockTupleMode lockmode;
/*
* Check the constraints of the tuple
result = heap_update(resultRelationDesc, tupleid, tuple,
estate->es_output_cid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd, &lockmode);
switch (result)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
* transaction. The former case is possible in a join UPDATE
- * where multiple tuples join to the same target tuple.
- * This is pretty questionable, but Postgres has always
- * allowed it: we just execute the first update action and
- * ignore additional update attempts.
+ * where multiple tuples join to the same target tuple. This
+ * is pretty questionable, but Postgres has always allowed it:
+ * we just execute the first update action and ignore
+ * additional update attempts.
*
* The latter case arises if the tuple is modified by a
* command in a BEFORE trigger, or perhaps by a command in a
* previous ones. So throwing an error is the only safe
* course.
*
- * If a trigger actually intends this type of interaction,
- * it can re-execute the UPDATE (assuming it can figure out
- * how) and then return NULL to cancel the outer update.
+ * If a trigger actually intends this type of interaction, it
+ * can re-execute the UPDATE (assuming it can figure out how)
+ * and then return NULL to cancel the outer update.
*/
if (hufd.cmax != estate->es_output_cid)
ereport(ERROR,
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
- ((SeqScan *) node->ps.plan)->scanrelid,
+ ((SeqScan *) node->ps.plan)->scanrelid,
eflags);
/* initialize a heapscan */
* CachedPlanSources.
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
List *
* return NULL. Caller is responsible for doing ReleaseCachedPlan().
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
CachedPlan *
stmt_list = pg_analyze_and_rewrite_params(parsetree,
src,
plan->parserSetup,
- plan->parserSetupArg);
+ plan->parserSetupArg);
}
else
{
plan->parserSetup,
plan->parserSetupArg,
plan->cursor_options,
- false); /* not fixed result */
+ false); /* not fixed result */
}
/*
int sz;
binaryheap *heap;
- sz = offsetof(binaryheap, bh_nodes) + sizeof(Datum) * capacity;
+ sz = offsetof(binaryheap, bh_nodes) +sizeof(Datum) * capacity;
heap = palloc(sz);
heap->bh_size = 0;
heap->bh_space = capacity;
static inline void
swap_nodes(binaryheap *heap, int a, int b)
{
- Datum swap;
+ Datum swap;
swap = heap->bh_nodes[a];
heap->bh_nodes[a] = heap->bh_nodes[b];
return ret;
retval = krb5_recvauth(pg_krb5_context, &auth_context,
- (krb5_pointer) &port->sock, pg_krb_srvnam,
+ (krb5_pointer) & port->sock, pg_krb_srvnam,
pg_krb5_server, 0, pg_krb5_keytab, &ticket);
if (retval)
{
{
ldap_unbind(*ldap);
ereport(LOG,
- (errmsg("could not set LDAP protocol version: %s", ldap_err2string(r))));
+ (errmsg("could not set LDAP protocol version: %s", ldap_err2string(r))));
return STATUS_ERROR;
}
{
ldap_unbind(*ldap);
ereport(LOG,
- (errmsg("could not start LDAP TLS session: %s", ldap_err2string(r))));
+ (errmsg("could not start LDAP TLS session: %s", ldap_err2string(r))));
return STATUS_ERROR;
}
}
{
ereport(LOG,
(errmsg("could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": %s",
- port->hba->ldapbinddn, port->hba->ldapserver, ldap_err2string(r))));
+ port->hba->ldapbinddn, port->hba->ldapserver, ldap_err2string(r))));
return STATUS_ERROR;
}
{
ereport(LOG,
(errmsg("could not search LDAP for filter \"%s\" on server \"%s\": %s",
- filter, port->hba->ldapserver, ldap_err2string(r))));
+ filter, port->hba->ldapserver, ldap_err2string(r))));
pfree(filter);
return STATUS_ERROR;
}
{
if (count == 0)
ereport(LOG,
- (errmsg("LDAP user \"%s\" does not exist", port->user_name),
- errdetail("LDAP search for filter \"%s\" on server \"%s\" returned no entries.",
- filter, port->hba->ldapserver)));
+ (errmsg("LDAP user \"%s\" does not exist", port->user_name),
+ errdetail("LDAP search for filter \"%s\" on server \"%s\" returned no entries.",
+ filter, port->hba->ldapserver)));
else
ereport(LOG,
- (errmsg("LDAP user \"%s\" is not unique", port->user_name),
- errdetail_plural("LDAP search for filter \"%s\" on server \"%s\" returned %d entry.",
- "LDAP search for filter \"%s\" on server \"%s\" returned %d entries.",
- count,
- filter, port->hba->ldapserver, count)));
+ (errmsg("LDAP user \"%s\" is not unique", port->user_name),
+ errdetail_plural("LDAP search for filter \"%s\" on server \"%s\" returned %d entry.",
+ "LDAP search for filter \"%s\" on server \"%s\" returned %d entries.",
+ count,
+ filter, port->hba->ldapserver, count)));
pfree(filter);
ldap_msgfree(search_message);
if (r != LDAP_SUCCESS)
{
ereport(LOG,
- (errmsg("LDAP login failed for user \"%s\" on server \"%s\": %s",
- fulluser, port->hba->ldapserver, ldap_err2string(r))));
+ (errmsg("LDAP login failed for user \"%s\" on server \"%s\": %s",
+ fulluser, port->hba->ldapserver, ldap_err2string(r))));
pfree(fulluser);
return STATUS_ERROR;
}
while (!feof(file) && !ferror(file))
{
- char rawline[MAX_LINE];
- char *lineptr;
+ char rawline[MAX_LINE];
+ char *lineptr;
if (!fgets(rawline, sizeof(rawline), file))
break;
- if (strlen(rawline) == MAX_LINE-1)
+ if (strlen(rawline) == MAX_LINE - 1)
/* Line too long! */
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
line_number, filename)));
/* Strip trailing linebreak from rawline */
- while (rawline[strlen(rawline)-1] == '\n' ||
- rawline[strlen(rawline)-1] == '\r')
- rawline[strlen(rawline)-1] = '\0';
+ while (rawline[strlen(rawline) - 1] == '\n' ||
+ rawline[strlen(rawline) - 1] == '\r')
+ rawline[strlen(rawline) - 1] = '\0';
lineptr = rawline;
while (strlen(lineptr) > 0)
{
#ifdef LDAP_API_FEATURE_X_OPENLDAP
LDAPURLDesc *urldata;
- int rc;
+ int rc;
#endif
REQUIRE_AUTH_OPTION(uaLDAP, "ldapurl", "ldap");
if (rc != LDAP_SUCCESS)
{
ereport(LOG,
- (errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc))));
+ (errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc))));
return false;
}
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unsupported LDAP URL scheme: %s", urldata->lud_scheme)));
+ errmsg("unsupported LDAP URL scheme: %s", urldata->lud_scheme)));
ldap_free_urldesc(urldata);
return false;
}
hbaline->ldapbasedn = pstrdup(urldata->lud_dn);
if (urldata->lud_attrs)
- hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]); /* only use first one */
+ hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]); /* only use first one */
hbaline->ldapscope = urldata->lud_scope;
if (urldata->lud_filter)
{
return false;
}
ldap_free_urldesc(urldata);
-#else /* not OpenLDAP */
+#else /* not OpenLDAP */
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("LDAP URLs not supported on this platform")));
-#endif /* not OpenLDAP */
+#endif /* not OpenLDAP */
}
else if (strcmp(name, "ldaptls") == 0)
{
ereport(LOG,
(errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
errmsg("regular expression \"%s\" has no subexpressions as requested by backreference in \"%s\"",
- identLine->ident_user + 1, identLine->pg_role)));
+ identLine->ident_user + 1, identLine->pg_role)));
*error_p = true;
return;
}
MemoryContext linecxt;
MemoryContext oldcxt;
MemoryContext ident_context;
- IdentLine *newline;
+ IdentLine *newline;
file = AllocateFile(IdentFileName, "r");
if (file == NULL)
/* Now parse all the lines */
ident_context = AllocSetContextCreate(TopMemoryContext,
- "ident parser context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "ident parser context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
oldcxt = MemoryContextSwitchTo(ident_context);
forboth(line_cell, ident_lines, num_cell, ident_line_nums)
{
{
if (!pg_set_noblock(MyProcPort->sock))
ereport(COMMERROR,
- (errmsg("could not set socket to nonblocking mode: %m")));
+ (errmsg("could not set socket to nonblocking mode: %m")));
}
else
{
#ifdef EXEC_BACKEND
if (argc > 1 && strncmp(argv[1], "--fork", 6) == 0)
- SubPostmasterMain(argc, argv); /* does not return */
+ SubPostmasterMain(argc, argv); /* does not return */
#endif
#ifdef WIN32
else if (argc > 1 && strcmp(argv[1], "--single") == 0)
PostgresMain(argc, argv,
NULL, /* no dbname */
- get_current_username(progname)); /* does not return */
+ get_current_username(progname)); /* does not return */
else
- PostmasterMain(argc, argv); /* does not return */
- abort(); /* should not get here */
+ PostmasterMain(argc, argv); /* does not return */
+ abort(); /* should not get here */
}
cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
int num_gene, City *city_table)
{
-
int i,
start_pos,
curr_pos;
px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
City *city_table)
{
-
int num_positions;
int i,
pos,
*/
if (childrel->cheapest_total_path->param_info == NULL)
subpaths = accumulate_append_subpath(subpaths,
- childrel->cheapest_total_path);
+ childrel->cheapest_total_path);
else
subpaths_valid = false;
#include "postgres.h"
#ifdef _MSC_VER
-#include <float.h> /* for _isnan */
+#include <float.h> /* for _isnan */
#endif
#include <math.h>
* The subquery could be an expansion of a view that's had columns
* added to it since the current query was parsed, so that there are
* non-junk tlist columns in it that don't correspond to any column
- * visible at our query level. Ignore such columns.
+ * visible at our query level. Ignore such columns.
*/
if (te->resno < rel->min_attr || te->resno > rel->max_attr)
continue;
/*
* We add ec2's items to ec1, then set ec2's ec_merged link to point
- * to ec1 and remove ec2 from the eq_classes list. We cannot simply
+ * to ec1 and remove ec2 from the eq_classes list. We cannot simply
* delete ec2 because that could leave dangling pointers in existing
* PathKeys. We leave it behind with a link so that the merged EC can
* be found.
continue;
/*
- * Scan members, looking for a match to the target column. Note
- * that child EC members are considered, but only when they belong to
- * the target relation. (Unlike regular members, the same expression
+ * Scan members, looking for a match to the target column. Note that
+ * child EC members are considered, but only when they belong to the
+ * target relation. (Unlike regular members, the same expression
* could be a child member of more than one EC. Therefore, it's
* potentially order-dependent which EC a child relation's target
* column gets matched to. This is annoying but it only happens in
* If there are any rels that have LATERAL references to this one, we
* cannot use join quals referencing them as index quals for this one,
* since such rels would have to be on the inside not the outside of a
- * nestloop join relative to this one. Create a Relids set listing all
+ * nestloop join relative to this one. Create a Relids set listing all
* such rels, for use in checks of potential join clauses.
*/
lateral_referencers = NULL;
*
* For simplicity in selecting relevant clauses, we represent each set of
* outer rels as a maximum set of clause_relids --- that is, the indexed
- * relation itself is also included in the relids set. considered_relids
+ * relation itself is also included in the relids set. considered_relids
* lists all relids sets we've already tried.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
*/
foreach(lc2, *considered_relids)
{
- Relids oldrelids = (Relids) lfirst(lc2);
+ Relids oldrelids = (Relids) lfirst(lc2);
/*
* If either is a subset of the other, no new set is possible.
/*
* If this clause was derived from an equivalence class, the
* clause list may contain other clauses derived from the same
- * eclass. We should not consider that combining this clause with
+ * eclass. We should not consider that combining this clause with
* one of those clauses generates a usefully different
* parameterization; so skip if any clause derived from the same
* eclass would already have been included when using oldrelids.
}
/*
- * Add applicable eclass join clauses. The clauses generated for each
+ * Add applicable eclass join clauses. The clauses generated for each
* column are redundant (cf generate_implied_equalities_for_column),
- * so we need at most one. This is the only exception to the general
+ * so we need at most one. This is the only exception to the general
* rule of using all available index clauses.
*/
foreach(lc, eclauseset->indexclauses[indexcol])
return;
/*
- * Construct a list of clauses that we can assume true for the purpose
- * of proving the index(es) usable. Restriction clauses for the rel are
+ * Construct a list of clauses that we can assume true for the purpose of
+ * proving the index(es) usable. Restriction clauses for the rel are
* always usable, and so are any join clauses that are "movable to" this
* rel. Also, we can consider any EC-derivable join clauses (which must
* be "movable to" this rel, by definition).
/*
* Add on any equivalence-derivable join clauses. Computing the correct
* relid sets for generate_join_implied_equalities is slightly tricky
- * because the rel could be a child rel rather than a true baserel, and
- * in that case we must remove its parent's relid from all_baserels.
+ * because the rel could be a child rel rather than a true baserel, and in
+ * that case we must remove its parent's relid from all_baserels.
*/
if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL)
{
clauselist =
list_concat(clauselist,
generate_join_implied_equalities(root,
- bms_union(rel->relids,
- otherrels),
+ bms_union(rel->relids,
+ otherrels),
otherrels,
rel));
* However, when a LATERAL subquery is involved, we have to be a bit
* laxer, because there will simply not be any paths for the joinrel that
* aren't parameterized by whatever the subquery is parameterized by,
- * unless its parameterization is resolved within the joinrel. Hence, add
+ * unless its parameterization is resolved within the joinrel. Hence, add
* to param_source_rels anything that is laterally referenced in either
* input and is not in the join already.
*/
* sort.
*
* This function intentionally does not consider parameterized input
- * paths, except when the cheapest-total is parameterized. If we did so,
+ * paths, except when the cheapest-total is parameterized. If we did so,
* we'd have a combinatorial explosion of mergejoin paths of dubious
* value. This interacts with decisions elsewhere that also discriminate
* against mergejoins with parameterized inputs; see comments in
* Likewise remove references from LateralJoinInfo data structures.
*
* If we are deleting a LATERAL subquery, we can forget its
- * LateralJoinInfo altogether. Otherwise, make sure the target is not
+ * LateralJoinInfo altogether. Otherwise, make sure the target is not
* included in any lateral_lhs set. (It probably can't be, since that
* should have precluded deciding to remove it; but let's cope anyway.)
*/
newvars = NIL;
foreach(lc, vars)
{
- Node *node = (Node *) lfirst(lc);
+ Node *node = (Node *) lfirst(lc);
node = copyObject(node);
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
/* Adjustment is easy since it's just one node */
var->varlevelsup = 0;
else if (IsA(node, PlaceHolderVar))
{
PlaceHolderVar *phv = (PlaceHolderVar *) node;
- int levelsup = phv->phlevelsup;
+ int levelsup = phv->phlevelsup;
/* Have to work harder to adjust the contained expression too */
if (levelsup != 0)
{
RelOptInfo *brel = root->simple_rel_array[rti];
Relids lateral_relids;
- ListCell *lc;
+ ListCell *lc;
/* there may be empty slots corresponding to non-baserel RTEs */
if (brel == NULL)
/* consider each laterally-referenced Var or PHV */
foreach(lc, brel->lateral_vars)
{
- Node *node = (Node *) lfirst(lc);
+ Node *node = (Node *) lfirst(lc);
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
add_lateral_info(root, rti, bms_make_singleton(var->varno));
lateral_relids = bms_add_member(lateral_relids,
* If it's an appendrel parent, copy its lateral_relids to each child
* rel. We intentionally give each child rel the same minimum
* parameterization, even though it's quite possible that some don't
- * reference all the lateral rels. This is because any append path
+ * reference all the lateral rels. This is because any append path
* for the parent will have to have the same parameterization for
* every child anyway, and there's no value in forcing extra
* reparameterize_path() calls.
* add_lateral_info
* Add a LateralJoinInfo to root->lateral_info_list, if needed
*
- * We suppress redundant list entries. The passed lhs set must be freshly
+ * We suppress redundant list entries. The passed lhs set must be freshly
* made; we free it if not used in a new list entry.
*/
static void
Assert(jointype != JOIN_RIGHT);
/*
- * Presently the executor cannot support FOR [KEY] UPDATE/SHARE marking of rels
- * appearing on the nullable side of an outer join. (It's somewhat unclear
- * what that would mean, anyway: what should we mark when a result row is
- * generated from no element of the nullable relation?) So, complain if
- * any nullable rel is FOR [KEY] UPDATE/SHARE.
+ * Presently the executor cannot support FOR [KEY] UPDATE/SHARE marking of
+ * rels appearing on the nullable side of an outer join. (It's somewhat
+ * unclear what that would mean, anyway: what should we mark when a result
+ * row is generated from no element of the nullable relation?) So,
+ * complain if any nullable rel is FOR [KEY] UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
* parser. It's because the parser hasn't got enough info --- consider
* that provides all its variables.
*
* "nullable_relids" is the set of relids used in the expressions that are
- * potentially nullable below the expressions. (This has to be supplied by
+ * potentially nullable below the expressions. (This has to be supplied by
* caller because this function is used after deconstruct_jointree, so we
* don't have knowledge of where the clause items came from.)
*
* We have to replace Aggrefs with Params in equivalence classes too, else
* ORDER BY or DISTINCT on an optimized aggregate will fail. We don't
* need to process child eclass members though, since they aren't of
- * interest anymore --- and replace_aggs_with_params_mutator isn't able
- * to handle Aggrefs containing translated child Vars, anyway.
+ * interest anymore --- and replace_aggs_with_params_mutator isn't able to
+ * handle Aggrefs containing translated child Vars, anyway.
*
* Note: at some point it might become necessary to mutate other data
* structures too, such as the query's sortClause or distinctClause. Right
#define EXPRKIND_QUAL 0
#define EXPRKIND_TARGET 1
#define EXPRKIND_RTFUNC 2
-#define EXPRKIND_RTFUNC_LATERAL 3
+#define EXPRKIND_RTFUNC_LATERAL 3
#define EXPRKIND_VALUES 4
-#define EXPRKIND_VALUES_LATERAL 5
+#define EXPRKIND_VALUES_LATERAL 5
#define EXPRKIND_LIMIT 6
#define EXPRKIND_APPINFO 7
#define EXPRKIND_PHV 8
returningLists = NIL;
/*
- * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
- * have dealt with fetching non-locked marked rows, else we need
- * to have ModifyTable do that.
+ * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
+ * will have dealt with fetching non-locked marked rows, else we
+ * need to have ModifyTable do that.
*/
if (parse->rowMarks)
rowMarks = NIL;
root->simple_rel_array = save_rel_array;
/*
- * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will have
- * dealt with fetching non-locked marked rows, else we need to have
+ * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
+ * have dealt with fetching non-locked marked rows, else we need to have
* ModifyTable do that.
*/
if (parse->rowMarks)
*/
current_pathkeys = make_pathkeys_for_sortclauses(root,
set_sortclauses,
- result_plan->targetlist);
+ result_plan->targetlist);
/*
* We should not need to call preprocess_targetlist, since we must be
tlist);
/*
- * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have checked
- * already, but let's make sure).
+ * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
+ * checked already, but let's make sure).
*/
if (parse->rowMarks)
ereport(ERROR,
* it's not worth trying to avoid it. In particular, think not to
* skip adding the Result if the initial window_tlist matches the
* top-level plan node's output, because we might change the tlist
- * inside the following loop.) Note that on second and subsequent
+ * inside the following loop.) Note that on second and subsequent
* passes through the following loop, the top-level node will be a
* WindowAgg which we know can project; so we only need to check
* once.
/*
* The "base" targetlist for all steps of the windowing process is
- * a flat tlist of all Vars and Aggs needed in the result. (In
+ * a flat tlist of all Vars and Aggs needed in the result. (In
* some cases we wouldn't need to propagate all of these all the
* way to the top, since they might only be needed as inputs to
* WindowFuncs. It's probably not worth trying to optimize that
* though.) We also add window partitioning and sorting
* expressions to the base tlist, to ensure they're computed only
* once at the bottom of the stack (that's critical for volatile
- * functions). As we climb up the stack, we'll add outputs for
+ * functions). As we climb up the stack, we'll add outputs for
* the WindowFuncs computed at each level.
*/
window_tlist = make_windowInputTargetList(root,
/*
* The copyObject steps here are needed to ensure that each plan
- * node has a separately modifiable tlist. (XXX wouldn't a
+ * node has a separately modifiable tlist. (XXX wouldn't a
* shallow list copy do for that?)
*/
result_plan->targetlist = (List *) copyObject(window_tlist);
* plan's tlist for any partitioning or ordering columns that
* aren't plain Vars. (In theory, make_windowInputTargetList
* should have provided all such columns, but let's not assume
- * that here.) Furthermore, this way we can use existing
+ * that here.) Furthermore, this way we can use existing
* infrastructure to identify which input columns are the
* interesting ones.
*/
}
/*
- * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node. (Note: we
- * intentionally test parse->rowMarks not root->rowMarks here. If there
- * are only non-locking rowmarks, they should be handled by the
+ * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
+ * (Note: we intentionally test parse->rowMarks not root->rowMarks here.
+ * If there are only non-locking rowmarks, they should be handled by the
* ModifyTable node instead.)
*/
if (parse->rowMarks)
if (parse->rowMarks)
{
/*
- * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside grouping,
- * since grouping renders a reference to individual tuple CTIDs
- * invalid. This is also checked at parse time, but that's
+ * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
+ * grouping, since grouping renders a reference to individual tuple
+ * CTIDs invalid. This is also checked at parse time, but that's
* insufficient because of rule substitution, query pullup, etc.
*/
CheckSelectLocking(parse);
else
{
/*
- * We only need rowmarks for UPDATE, DELETE, or FOR [KEY] UPDATE/SHARE.
+ * We only need rowmarks for UPDATE, DELETE, or FOR [KEY]
+ * UPDATE/SHARE.
*/
if (parse->commandType != CMD_UPDATE &&
parse->commandType != CMD_DELETE)
*
* If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
* a Limit node. This is worth checking for because "OFFSET 0" is a common
- * locution for an optimization fence. (Because other places in the planner
+ * locution for an optimization fence. (Because other places in the planner
* merely check whether parse->limitOffset isn't NULL, it will still work as
* an optimization fence --- we're just suppressing unnecessary run-time
* overhead.)
/* Treat NULL as no offset; the executor would too */
if (!((Const *) node)->constisnull)
{
- int64 offset = DatumGetInt64(((Const *) node)->constvalue);
+ int64 offset = DatumGetInt64(((Const *) node)->constvalue);
/* Executor would treat less-than-zero same as zero */
if (offset > 0)
*
* When grouping_planner inserts one or more WindowAgg nodes into the plan,
* this function computes the initial target list to be computed by the node
- * just below the first WindowAgg. This list must contain all values needed
+ * just below the first WindowAgg. This list must contain all values needed
* to evaluate the window functions, compute the final target list, and
* perform any required final sort step. If multiple WindowAggs are needed,
* each intermediate one adds its window function results onto this tlist;
*
* This function is much like make_subplanTargetList, though not quite enough
* like it to share code. As in that function, we flatten most expressions
- * into their component variables. But we do not want to flatten window
+ * into their component variables. But we do not want to flatten window
* PARTITION BY/ORDER BY clauses, since that might result in multiple
* evaluations of them, which would be bad (possibly even resulting in
* inconsistent answers, if they contain volatile functions). Also, we must
rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
rte->relid = tableOid;
- rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
+ rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
rte->lateral = false;
rte->inh = false;
rte->inFromCl = true;
*
* If this jointree node is within either side of an outer join, then
* lowest_outer_join references the lowest such JoinExpr node; otherwise
- * it is NULL. We use this to constrain the effects of LATERAL subqueries.
+ * it is NULL. We use this to constrain the effects of LATERAL subqueries.
*
* If this jointree node is within the nullable side of an outer join, then
* lowest_nulling_outer_join references the lowest such JoinExpr node;
case JOIN_INNER:
j->larg = pull_up_subqueries_recurse(root, j->larg,
lowest_outer_join,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
j->rarg = pull_up_subqueries_recurse(root, j->rarg,
lowest_outer_join,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
break;
case JOIN_LEFT:
case JOIN_ANTI:
j->larg = pull_up_subqueries_recurse(root, j->larg,
j,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
j->rarg = pull_up_subqueries_recurse(root, j->rarg,
j,
NULL);
j->rarg = pull_up_subqueries_recurse(root, j->rarg,
j,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
break;
default:
/*
* Make a modifiable copy of the subquery's rtable, so we can adjust
- * upper-level Vars in it. There are no such Vars in the setOperations
+ * upper-level Vars in it. There are no such Vars in the setOperations
* tree proper, so fixing the rtable should be sufficient.
*/
rtable = copyObject(subquery->rtable);
return false;
/*
- * Don't pull up if the RTE represents a security-barrier view; we couldn't
- * prevent information leakage once the RTE's Vars are scattered about in
- * the upper query.
+ * Don't pull up if the RTE represents a security-barrier view; we
+ * couldn't prevent information leakage once the RTE's Vars are scattered
+ * about in the upper query.
*/
if (rte->security_barrier)
return false;
*/
if (rte->lateral && lowest_outer_join != NULL)
{
- Relids lvarnos = pull_varnos_of_level((Node *) subquery, 1);
- Relids jvarnos = get_relids_in_jointree((Node *) lowest_outer_join,
- true);
+ Relids lvarnos = pull_varnos_of_level((Node *) subquery, 1);
+ Relids jvarnos = get_relids_in_jointree((Node *) lowest_outer_join,
+ true);
if (!bms_is_subset(lvarnos, jvarnos))
return false;
/*
* If the RangeTblRef refers to a LATERAL subquery (that isn't the
* same subquery we're pulling up), it might contain references to the
- * target subquery, which we must replace. We drive this from the
+ * target subquery, which we must replace. We drive this from the
* jointree scan, rather than a scan of the rtable, for a couple of
* reasons: we can avoid processing no-longer-referenced RTEs, and we
* can use the appropriate setting of need_phvs depending on whether
newexpr->funcresulttype = result_type;
newexpr->funcretset = false;
newexpr->funcvariadic = funcvariadic;
- newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
+ newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
newexpr->funccollid = result_collid; /* doesn't matter */
newexpr->inputcollid = input_collid;
newexpr->args = args;
*
* cheapest_total_path is normally the cheapest-total-cost unparameterized
* path; but if there are no unparameterized paths, we assign it to be the
- * best (cheapest least-parameterized) parameterized path. However, only
+ * best (cheapest least-parameterized) parameterized path. However, only
* unparameterized paths are considered candidates for cheapest_startup_path,
* so that will be NULL if there are no unparameterized paths.
*
* The cheapest_parameterized_paths list collects all parameterized paths
- * that have survived the add_path() tournament for this relation. (Since
+ * that have survived the add_path() tournament for this relation. (Since
* add_path ignores pathkeys and startup cost for a parameterized path,
* these will be paths that have best total cost or best row count for their
* parameterization.) cheapest_parameterized_paths always includes the
/* old path is less-parameterized, keep it */
break;
case BMS_DIFFERENT:
+
/*
* This means that neither path has the least possible
* parameterization for the rel. We'll sit on the old
parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
/*
- * If there is no unparameterized path, use the best parameterized path
- * as cheapest_total_path (but not as cheapest_startup_path).
+ * If there is no unparameterized path, use the best parameterized path as
+ * cheapest_total_path (but not as cheapest_startup_path).
*/
if (cheapest_total_path == NULL)
cheapest_total_path = best_param_path;
accept_new = false; /* old dominates new */
else if (compare_path_costs_fuzzily(new_path,
old_path,
- 1.0000000001,
+ 1.0000000001,
parent_rel->consider_startup) == COSTS_BETTER1)
remove_old = true; /* new dominates old */
else
pathnode->path.pathtype = T_Result;
pathnode->path.parent = NULL;
- pathnode->path.param_info = NULL; /* there are no other rels... */
+ pathnode->path.param_info = NULL; /* there are no other rels... */
pathnode->path.pathkeys = NIL;
pathnode->quals = quals;
else
{
/*
- * Process INSERT ... VALUES with a single VALUES sublist. We treat
+ * Process INSERT ... VALUES with a single VALUES sublist. We treat
* this case separately for efficiency. The sublist is just computed
* directly as the Query's targetlist, with no VALUES RTE. So it
* works just like a SELECT without any FROM.
/*
* Ordinarily there can't be any current-level Vars in the expression
* lists, because the namespace was empty ... but if we're inside CREATE
- * RULE, then NEW/OLD references might appear. In that case we have to
+ * RULE, then NEW/OLD references might appear. In that case we have to
* mark the VALUES RTE as LATERAL.
*/
if (pstate->p_rtable != NIL &&
/*
* A materialized view would either need to save parameters for use in
- * maintaining/loading the data or prohibit them entirely. The latter
+ * maintaining/loading the data or prohibit them entirely. The latter
* seems safer and more sane.
*/
if (query_contains_extern_params(query))
errmsg("materialized views may not be defined using bound parameters")));
/*
- * For now, we disallow unlogged materialized views, because it
- * seems like a bad idea for them to just go to empty after a crash.
- * (If we could mark them as unpopulated, that would be better, but
- * that requires catalog changes which crash recovery can't presently
+ * For now, we disallow unlogged materialized views, because it seems
+ * like a bad idea for them to just go to empty after a crash. (If we
+ * could mark them as unpopulated, that would be better, but that
+ * requires catalog changes which crash recovery can't presently
* handle.)
*/
if (stmt->into->rel->relpersistence == RELPERSISTENCE_UNLOGGED)
if (qry->distinctClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with DISTINCT clause")));
+ errmsg("row-level locks are not allowed with DISTINCT clause")));
if (qry->groupClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with GROUP BY clause")));
+ errmsg("row-level locks are not allowed with GROUP BY clause")));
if (qry->havingQual != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with HAVING clause")));
+ errmsg("row-level locks are not allowed with HAVING clause")));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with aggregate functions")));
+ errmsg("row-level locks are not allowed with aggregate functions")));
if (qry->hasWindowFuncs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with window functions")));
+ errmsg("row-level locks are not allowed with window functions")));
if (expression_returns_set((Node *) qry->targetList))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
{
/*
* If the same RTE is specified for more than one locking strength,
- * treat is as the strongest. (Reasonable, since you can't take both a
- * shared and exclusive lock at the same time; it'll end up being
+ * treat is as the strongest. (Reasonable, since you can't take both
+ * a shared and exclusive lock at the same time; it'll end up being
* exclusive anyway.)
*
* We also consider that NOWAIT wins if it's specified both ways. This
use warnings;
use strict;
-my $gram_filename = $ARGV[0];
+my $gram_filename = $ARGV[0];
my $kwlist_filename = $ARGV[1];
my $errors = 0;
if (!($kcat))
{
+
# Is this the beginning of a keyword list?
foreach $k (keys %keyword_categories)
{
}
elsif ($arr[$fieldIndexer] eq '/*')
{
+
# start of a multiline comment
$comment = 1;
next;
if ($arr[$fieldIndexer] eq ';')
{
+
# end of keyword list
$kcat = '';
next;
foreach $kword (@{ $keywords{$kcat} })
{
+
# Some keyword have a _P suffix. Remove it for the comparison.
$bare_kword = $kword;
$bare_kword =~ s/_P$//;
}
else
{
+
# Remove it from the hash, so that we can
# complain at the end if there's keywords left
# that were not found in kwlist.h
if (errkind)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("aggregate functions are not allowed in %s",
ParseExprKindName(pstate->p_expr_kind)),
parser_errposition(pstate, agg->location)));
if (errkind)
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("window functions are not allowed in %s",
ParseExprKindName(pstate->p_expr_kind)),
parser_errposition(pstate, wfunc->location)));
* *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *namespace: receives a List of ParseNamespaceItems for the RTEs exposed
- * as table/column names by this item. (The lateral_only flags in these items
+ * as table/column names by this item. (The lateral_only flags in these items
* are indeterminate and should be explicitly set by the caller before use.)
*/
static Node *
/*
* Make the left-side RTEs available for LATERAL access within the
* right side, by temporarily adding them to the pstate's namespace
- * list. Per SQL:2008, if the join type is not INNER or LEFT then
- * the left-side names must still be exposed, but it's an error to
+ * list. Per SQL:2008, if the join type is not INNER or LEFT then the
+ * left-side names must still be exposed, but it's an error to
* reference them. (Stupid design, but that's what it says.) Hence,
* we always push them into the namespace, but mark them as not
* lateral_ok if the jointype is wrong.
*
* Note: if there are nested alias-less JOINs, the lower-level ones
* will remain in the list although they have neither p_rel_visible
- * nor p_cols_visible set. We could delete such list items, but it's
+ * nor p_cols_visible set. We could delete such list items, but it's
* unclear that it's worth expending cycles to do so.
*/
if (j->alias != NULL)
contain_aggs_of_level((Node *) tle->expr, 0))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("aggregate functions are not allowed in %s",
ParseExprKindName(exprKind)),
parser_errposition(pstate,
- locate_agg_of_level((Node *) tle->expr, 0))));
+ locate_agg_of_level((Node *) tle->expr, 0))));
if (pstate->p_hasWindowFuncs &&
contain_windowfuncs((Node *) tle->expr))
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("window functions are not allowed in %s",
ParseExprKindName(exprKind)),
parser_errposition(pstate,
- locate_windowfunc((Node *) tle->expr))));
+ locate_windowfunc((Node *) tle->expr))));
break;
case EXPR_KIND_ORDER_BY:
/* no extra checks needed */
*
* node the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched
* tlist the target list (passed by reference so we can append to it)
- * exprKind identifies clause type being processed
+ * exprKind identifies clause type being processed
*/
static TargetEntry *
findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
*
* node the ORDER BY, GROUP BY, etc expression to be matched
* tlist the target list (passed by reference so we can append to it)
- * exprKind identifies clause type being processed
+ * exprKind identifies clause type being processed
*/
static TargetEntry *
findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
break;
default:
elog(ERROR, "unrecognized A_Expr kind: %d", a->kind);
- result = NULL; /* keep compiler quiet */
+ result = NULL; /* keep compiler quiet */
break;
}
break;
return result;
/*
- * Check to see if the sublink is in an invalid place within the query.
- * We allow sublinks everywhere in SELECT/INSERT/UPDATE/DELETE, but
- * generally not in utility statements.
+ * Check to see if the sublink is in an invalid place within the query. We
+ * allow sublinks everywhere in SELECT/INSERT/UPDATE/DELETE, but generally
+ * not in utility statements.
*/
err = NULL;
switch (pstate->p_expr_kind)
xexpr = makeNode(XmlExpr);
xexpr->op = IS_XMLSERIALIZE;
xexpr->args = list_make1(coerce_to_specific_type(pstate,
- transformExprRecurse(pstate, xs->expr),
+ transformExprRecurse(pstate, xs->expr),
XMLOID,
"XMLSERIALIZE"));
*
* This is different from refnameRangeTblEntry in that it considers every
* entry in the ParseState's rangetable(s), not only those that are currently
- * visible in the p_namespace list(s). This behavior is invalid per the SQL
+ * visible in the p_namespace list(s). This behavior is invalid per the SQL
* spec, and it may give ambiguous results (there might be multiple equally
* valid matches, but only one will be returned). This must be used ONLY
* as a heuristic in giving suitable error messages. See errorMissingRTE.
*
* This is different from colNameToVar in that it considers every entry in
* the ParseState's rangetable(s), not only those that are currently visible
- * in the p_namespace list(s). This behavior is invalid per the SQL spec,
+ * in the p_namespace list(s). This behavior is invalid per the SQL spec,
* and it may give ambiguous results (there might be multiple equally valid
* matches, but only one will be returned). This must be used ONLY as a
* heuristic in giving suitable error messages. See errorMissingColumn.
*
* node the (untransformed) parse tree for the value expression.
* expr the transformed expression, or NULL if caller didn't do it yet.
- * exprKind expression kind (EXPR_KIND_SELECT_TARGET, etc)
+ * exprKind expression kind (EXPR_KIND_SELECT_TARGET, etc)
* colname the column name to be assigned, or NULL if none yet set.
* resjunk true if the target should be marked resjunk, ie, it is not
* wanted in the final projected tuple.
* Transforms '*' (in the target list) into a list of targetlist entries.
*
* tlist entries are generated for each relation visible for unqualified
- * column name access. We do not consider qualified-name-only entries because
+ * column name access. We do not consider qualified-name-only entries because
* that would include input tables of aliasless JOINs, NEW/OLD pseudo-entries,
* etc.
*
if (cxt->isforeign)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("constraints are not supported on foreign tables"),
+ errmsg("constraints are not supported on foreign tables"),
&