# This is only needed on Windows machines that don't use UNIX sockets.
$node->init(
'allows_streaming' => 1,
- 'auth_extra' => [ '--create-role', 'backupuser' ]);
+ 'auth_extra' => [ '--create-role' => 'backupuser' ]);
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'basebackup_to_shell'");
# to keep test times reasonable. Using @pg_basebackup_defs as the first
# element of the array passed to IPC::Run interpolate the array (as it is
# not a reference to an array)...
-my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
+my @pg_basebackup_defs =
+ ('pg_basebackup', '--no-sync', '--checkpoint' => 'fast');
# This particular test module generally wants to run with -Xfetch, because
# -Xstream is not supported with a backup target, and with -U backupuser.
-my @pg_basebackup_cmd = (@pg_basebackup_defs, '-U', 'backupuser', '-Xfetch');
+my @pg_basebackup_cmd = (
+ @pg_basebackup_defs,
+ '--username' => 'backupuser',
+ '--wal-method' => 'fetch');
# Can't use this module without setting basebackup_to_shell.command.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
qr/shell command for backup is not configured/,
'fails if basebackup_to_shell.command is not set');
# Should work now.
$node->command_ok(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
'backup with no detail: pg_basebackup');
verify_backup('', $backup_path, "backup with no detail");
# Should fail with a detail.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell:foo' ],
qr/a target detail is not permitted because the configured command does not include %d/,
'fails if detail provided without %d');
# Should fail due to lack of permission.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
qr/permission denied to use basebackup_to_shell/,
'fails if required_role not granted');
# Should fail due to lack of a detail.
$node->safe_psql('postgres', 'GRANT trustworthy TO backupuser');
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
qr/a target detail is required because the configured command includes %d/,
'fails if %d is present and detail not given');
# Should work.
-$node->command_ok([ @pg_basebackup_cmd, '--target', 'shell:bar' ],
+$node->command_ok([ @pg_basebackup_cmd, '--target' => 'shell:bar' ],
'backup with detail: pg_basebackup');
verify_backup('bar.', $backup_path, "backup with detail");
# Verify.
$node->command_ok(
[
- 'pg_verifybackup', '-n',
- '-m', "${backup_dir}/${prefix}backup_manifest",
- '-e', $extract_path
+ 'pg_verifybackup',
+ '--no-parse-wal',
+ '--manifest-path' => "${backup_dir}/${prefix}backup_manifest",
+ '--exit-on-error',
+ $extract_path
],
"$test_name: backup verifies ok");
}
program_version_ok('initdb');
program_options_handling_ok('initdb');
-command_fails([ 'initdb', '-S', "$tempdir/nonexistent" ],
+command_fails([ 'initdb', '--sync-only', "$tempdir/nonexistent" ],
'sync missing data directory');
mkdir $xlogdir;
mkdir "$xlogdir/lost+found";
-command_fails(
- [ 'initdb', '-X', $xlogdir, $datadir ],
+command_fails([ 'initdb', '--waldir' => $xlogdir, $datadir ],
'existing nonempty xlog directory');
rmdir "$xlogdir/lost+found";
command_fails(
- [ 'initdb', '-X', 'pgxlog', $datadir ],
+ [ 'initdb', '--waldir' => 'pgxlog', $datadir ],
'relative xlog directory not allowed');
-command_fails(
- [ 'initdb', '-U', 'pg_test', $datadir ],
+command_fails([ 'initdb', '--username' => 'pg_test', $datadir ],
'role names cannot begin with "pg_"');
mkdir $datadir;
local (%ENV) = %ENV;
delete $ENV{TZ};
- # while we are here, also exercise -T and -c options
+ # while we are here, also exercise --text-search-config and --set options
command_ok(
[
- 'initdb', '-N', '-T', 'german', '-c',
- 'default_text_search_config=german',
- '-X', $xlogdir, $datadir
+ 'initdb',
+ '--no-sync',
+ '--text-search-config' => 'german',
+ '--set' => 'default_text_search_config=german',
+ '--waldir' => $xlogdir,
+ $datadir
],
'successful creation');
qr/Data page checksum version:.*1/,
'checksums are enabled in control file');
-command_ok([ 'initdb', '-S', $datadir ], 'sync only');
+command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
command_fails([ 'initdb', $datadir ], 'existing data directory');
if ($supports_syncfs)
{
- command_ok([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ],
+ command_ok(
+ [ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ],
'sync method syncfs');
}
else
{
- command_fails([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ],
+ command_fails(
+ [ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ],
'sync method syncfs');
}
command_like(
[
'initdb', '--no-sync',
- '-A', 'trust',
+ '-A' => 'trust',
'--locale-provider=icu', '--locale=und',
'--lc-collate=C', '--lc-ctype=C',
'--lc-messages=C', '--lc-numeric=C',
],
'fails for invalid option combination');
-command_fails([ 'initdb', '--no-sync', '--set', 'foo=bar', "$tempdir/dataX" ],
+command_fails(
+ [ 'initdb', '--no-sync', '--set' => 'foo=bar', "$tempdir/dataX" ],
'fails for invalid --set option');
# Make sure multiple invocations of -c parameters are added case insensitive
# not part of the tests included in pg_checksums to save from
# the creation of an extra instance.
command_fails(
- [ 'pg_checksums', '-D', $datadir_nochecksums ],
+ [ 'pg_checksums', '--pgdata' => $datadir_nochecksums ],
"pg_checksums fails with data checksum disabled");
done_testing();
# Failing to resolve a database pattern is an error by default.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'qqq', '-d', 'postgres' ],
+ [ 'pg_amcheck', '--database' => 'qqq', '--database' => 'postgres' ],
1,
[qr/^$/],
[qr/pg_amcheck: error: no connectable databases to check matching "qqq"/],
# But only a warning under --no-strict-names
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-d', 'qqq', '-d', 'postgres' ],
+ [
+ 'pg_amcheck',
+ '--no-strict-names',
+ '--database' => 'qqq',
+ '--database' => 'postgres'
+ ],
0,
[qr/^$/],
[
# Check that a substring of an existent database name does not get interpreted
# as a matching pattern.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'post', '-d', 'postgres' ],
+ [ 'pg_amcheck', '--database' => 'post', '--database' => 'postgres' ],
1,
[qr/^$/],
[
# Check that a superstring of an existent database name does not get interpreted
# as a matching pattern.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgresql', '-d', 'postgres' ],
+ [
+ 'pg_amcheck',
+ '--database' => 'postgresql',
+ '--database' => 'postgres'
+ ],
1,
[qr/^$/],
[
# Test connecting with a non-existent user
# Failing to connect to the initial database due to bad username is an error.
-$node->command_checks_all([ 'pg_amcheck', '-U', 'no_such_user', 'postgres' ],
+$node->command_checks_all(
+ [ 'pg_amcheck', '--username' => 'no_such_user', 'postgres' ],
1, [qr/^$/], [], 'checking with a non-existent user');
#########################################
# Again, but this time with another database to check, so no error is raised.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'template1', '-d', 'postgres' ],
+ [ 'pg_amcheck', '--database' => 'template1', '--database' => 'postgres' ],
0,
[qr/^$/],
[
# Check three-part unreasonable pattern that has zero-length names
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgres', '-t', '..' ],
+ [ 'pg_amcheck', '--database' => 'postgres', '--table' => '..' ],
1,
[qr/^$/],
[
# Again, but with non-trivial schema and relation parts
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgres', '-t', '.foo.bar' ],
+ [ 'pg_amcheck', '--database' => 'postgres', '--table' => '.foo.bar' ],
1,
[qr/^$/],
[
# Check two-part unreasonable pattern that has zero-length names
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgres', '-t', '.' ],
+ [ 'pg_amcheck', '--database' => 'postgres', '--table' => '.' ],
1,
[qr/^$/],
[qr/pg_amcheck: error: no heap tables to check matching "\."/],
# Check that a multipart database name is rejected
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'localhost.postgres' ],
+ [ 'pg_amcheck', '--database' => 'localhost.postgres' ],
2,
[qr/^$/],
[
# Check that a three-part schema name is rejected
$node->command_checks_all(
- [ 'pg_amcheck', '-s', 'localhost.postgres.pg_catalog' ],
+ [ 'pg_amcheck', '--schema' => 'localhost.postgres.pg_catalog' ],
2,
[qr/^$/],
[
# Check that a four-part table name is rejected
$node->command_checks_all(
- [ 'pg_amcheck', '-t', 'localhost.postgres.pg_catalog.pg_class' ],
+ [ 'pg_amcheck', '--table' => 'localhost.postgres.pg_catalog.pg_class' ],
2,
[qr/^$/],
[
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'this.is.a.really.long.dotted.string'
+ '--table' => 'this.is.a.really.long.dotted.string'
],
2,
[qr/^$/],
'ungrammatical table names still draw errors under --no-strict-names');
$node->command_checks_all(
[
- 'pg_amcheck', '--no-strict-names', '-s',
- 'postgres.long.dotted.string'
+ 'pg_amcheck', '--no-strict-names',
+ '--schema' => 'postgres.long.dotted.string'
],
2,
[qr/^$/],
'ungrammatical schema names still draw errors under --no-strict-names');
$node->command_checks_all(
[
- 'pg_amcheck', '--no-strict-names', '-d',
- 'postgres.long.dotted.string'
+ 'pg_amcheck', '--no-strict-names',
+ '--database' => 'postgres.long.dotted.string'
],
2,
[qr/^$/],
# Likewise for exclusion patterns
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-T', 'a.b.c.d' ],
+ [ 'pg_amcheck', '--no-strict-names', '--exclude-table' => 'a.b.c.d' ],
2,
[qr/^$/],
[
'ungrammatical table exclusions still draw errors under --no-strict-names'
);
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-S', 'a.b.c' ],
+ [ 'pg_amcheck', '--no-strict-names', '--exclude-schema' => 'a.b.c' ],
2,
[qr/^$/],
[
'ungrammatical schema exclusions still draw errors under --no-strict-names'
);
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-D', 'a.b' ],
+ [ 'pg_amcheck', '--no-strict-names', '--exclude-database' => 'a.b' ],
2,
[qr/^$/],
[
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'no_such_table',
- '-t', 'no*such*table',
- '-i', 'no_such_index',
- '-i', 'no*such*index',
- '-r', 'no_such_relation',
- '-r', 'no*such*relation',
- '-d', 'no_such_database',
- '-d', 'no*such*database',
- '-r', 'none.none',
- '-r', 'none.none.none',
- '-r', 'postgres.none.none',
- '-r', 'postgres.pg_catalog.none',
- '-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ '--table' => 'no_such_table',
+ '--table' => 'no*such*table',
+ '--index' => 'no_such_index',
+ '--index' => 'no*such*index',
+ '--relation' => 'no_such_relation',
+ '--relation' => 'no*such*relation',
+ '--database' => 'no_such_database',
+ '--database' => 'no*such*database',
+ '--relation' => 'none.none',
+ '--relation' => 'none.none.none',
+ '--relation' => 'postgres.none.none',
+ '--relation' => 'postgres.pg_catalog.none',
+ '--relation' => 'postgres.none.pg_class',
+ '--table' => 'postgres.pg_catalog.pg_class', # This exists
],
0,
[qr/^$/],
));
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'regression_invalid' ],
+ [ 'pg_amcheck', '--database' => 'regression_invalid' ],
1,
[qr/^$/],
[
$node->command_checks_all(
[
- 'pg_amcheck', '-d', 'postgres', '-t', 'regression_invalid.public.foo',
+ 'pg_amcheck',
+ '--database' => 'postgres',
+ '--table' => 'regression_invalid.public.foo',
],
1,
[qr/^$/],
$node->command_checks_all(
[
- 'pg_amcheck', '-d',
- 'postgres', '--no-strict-names',
- '-t', 'template1.public.foo',
- '-t', 'another_db.public.foo',
- '-t', 'no_such_database.public.foo',
- '-i', 'template1.public.foo_idx',
- '-i', 'another_db.public.foo_idx',
- '-i', 'no_such_database.public.foo_idx',
+ 'pg_amcheck',
+ '--database' => 'postgres',
+ '--no-strict-names',
+ '--table' => 'template1.public.foo',
+ '--table' => 'another_db.public.foo',
+ '--table' => 'no_such_database.public.foo',
+ '--index' => 'template1.public.foo_idx',
+ '--index' => 'another_db.public.foo_idx',
+ '--index' => 'no_such_database.public.foo_idx',
],
1,
[qr/^$/],
# Check with only schema exclusion patterns
$node->command_checks_all(
[
- 'pg_amcheck', '--all', '--no-strict-names', '-S',
- 'public', '-S', 'pg_catalog', '-S',
- 'pg_toast', '-S', 'information_schema',
+ 'pg_amcheck',
+ '--all',
+ '--no-strict-names',
+ '--exclude-schema' => 'public',
+ '--exclude-schema' => 'pg_catalog',
+ '--exclude-schema' => 'pg_toast',
+ '--exclude-schema' => 'information_schema',
],
1,
[qr/^$/],
# Check with schema exclusion patterns overriding relation and schema inclusion patterns
$node->command_checks_all(
[
- 'pg_amcheck', '--all', '--no-strict-names', '-s',
- 'public', '-s', 'pg_catalog', '-s',
- 'pg_toast', '-s', 'information_schema', '-t',
- 'pg_catalog.pg_class', '-S*'
+ 'pg_amcheck',
+ '--all',
+ '--no-strict-names',
+ '--schema' => 'public',
+ '--schema' => 'pg_catalog',
+ '--schema' => 'pg_toast',
+ '--schema' => 'information_schema',
+ '--table' => 'pg_catalog.pg_class',
+ '--exclude-schema' => '*'
],
1,
[qr/^$/],
#
# Standard first arguments to PostgreSQL::Test::Utils functions
-my @cmd = ('pg_amcheck', '-p', $port);
+my @cmd = ('pg_amcheck', '--port' => $port);
# Regular expressions to match various expected output
my $no_output_re = qr/^$/;
# yet corrupted anything. As such, we expect no corruption and verify that
# none is reported
#
-$node->command_checks_all([ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
- 0, [$no_output_re], [$no_output_re], 'pg_amcheck prior to corruption');
+$node->command_checks_all(
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3'
+ ],
+ 0,
+ [$no_output_re],
+ [$no_output_re],
+ 'pg_amcheck prior to corruption');
# Perform the corruptions we planned above using only a single database restart.
#
'pg_amcheck all schemas, tables and indexes in database db1');
$node->command_checks_all(
- [ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3'
+ ],
2,
[
$index_missing_relation_fork_re, $line_pointer_corruption_re,
# complaint on stderr, but otherwise stderr should be quiet.
#
$node->command_checks_all(
- [ @cmd, '--all', '-s', 's1', '-i', 't1_btree' ],
+ [ @cmd, '--all', '--schema' => 's1', '--index' => 't1_btree' ],
2,
[$index_missing_relation_fork_re],
[
'pg_amcheck index s1.t1_btree reports missing main relation fork');
$node->command_checks_all(
- [ @cmd, '-d', 'db1', '-s', 's1', '-i', 't2_btree' ],
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--schema' => 's1',
+ '--index' => 't2_btree'
+ ],
2,
[qr/.+/], # Any non-empty error message is acceptable
[$no_output_re],
# are quiet.
#
$node->command_checks_all(
- [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
+ [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ],
0, [$no_output_re], [$no_output_re],
'pg_amcheck of db1.s1 excluding indexes');
# Checking db2.s1 should show table corruptions if indexes are excluded
#
$node->command_checks_all(
- [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db2' ],
- 2, [$missing_file_re], [$no_output_re],
+ [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db2' ],
+ 2,
+ [$missing_file_re],
+ [$no_output_re],
'pg_amcheck of db2.s1 excluding indexes');
# In schema db1.s3, the tables and indexes are both corrupt. We should see
# corruption messages on stdout, and nothing on stderr.
#
$node->command_checks_all(
- [ @cmd, '-s', 's3', 'db1' ],
+ [ @cmd, '--schema' => 's3', 'db1' ],
2,
[
$index_missing_relation_fork_re, $line_pointer_corruption_re,
# In schema db1.s4, only toast tables are corrupt. Check that under default
# options the toast corruption is reported, but when excluding toast we get no
# error reports.
-$node->command_checks_all([ @cmd, '-s', 's4', 'db1' ],
+$node->command_checks_all([ @cmd, '--schema' => 's4', 'db1' ],
2, [$missing_file_re], [$no_output_re],
'pg_amcheck in schema s4 reports toast corruption');
$node->command_checks_all(
[
- @cmd, '--no-dependent-toast', '--exclude-toast-pointers', '-s', 's4',
+ @cmd,
+ '--no-dependent-toast',
+ '--exclude-toast-pointers',
+ '--schema' => 's4',
'db1'
],
0,
'pg_amcheck in schema s4 excluding toast reports no corruption');
# Check that no corruption is reported in schema db1.s5
-$node->command_checks_all([ @cmd, '-s', 's5', 'db1' ],
+$node->command_checks_all([ @cmd, '--schema' => 's5', 'db1' ],
0, [$no_output_re], [$no_output_re],
'pg_amcheck over schema s5 reports no corruption');
# the indexes, no corruption is reported about the schema.
#
$node->command_checks_all(
- [ @cmd, '-s', 's1', '-I', 't1_btree', '-I', 't2_btree', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's1',
+ '--exclude-index' => 't1_btree',
+ '--exclude-index' => 't2_btree',
+ 'db1'
+ ],
0,
[$no_output_re],
[$no_output_re],
# about the schema.
#
$node->command_checks_all(
- [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
+ [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ],
0,
[$no_output_re],
[$no_output_re],
# tables that no corruption is reported.
#
$node->command_checks_all(
- [ @cmd, '-s', 's2', '-T', 't1', '-T', 't2', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's2',
+ '--exclude-table' => 't1',
+ '--exclude-table' => 't2',
+ 'db1'
+ ],
0,
[$no_output_re],
[$no_output_re],
# to avoid getting messages about corrupt tables or indexes.
#
command_fails_like(
- [ @cmd, '-s', 's5', '--startblock', 'junk', 'db1' ],
+ [ @cmd, '--schema' => 's5', '--startblock' => 'junk', 'db1' ],
qr/invalid start block/,
'pg_amcheck rejects garbage startblock');
command_fails_like(
- [ @cmd, '-s', 's5', '--endblock', '1234junk', 'db1' ],
+ [ @cmd, '--schema' => 's5', '--endblock' => '1234junk', 'db1' ],
qr/invalid end block/,
'pg_amcheck rejects garbage endblock');
command_fails_like(
- [ @cmd, '-s', 's5', '--startblock', '5', '--endblock', '4', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's5',
+ '--startblock' => '5',
+ '--endblock' => '4',
+ 'db1'
+ ],
qr/end block precedes start block/,
'pg_amcheck rejects invalid block range');
# arguments are handled sensibly.
#
$node->command_checks_all(
- [ @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--parent-check', 'db1'
+ ],
2,
[$index_missing_relation_fork_re],
[$no_output_re],
$node->command_checks_all(
[
- @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed',
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--heapallindexed',
'--rootdescend', 'db1'
],
2,
'pg_amcheck smoke test --heapallindexed --rootdescend');
$node->command_checks_all(
- [ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3', '-S', 's*' ],
- 0, [$no_output_re], [$no_output_re],
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3',
+ '--exclude-schema' => 's*'
+ ],
+ 0,
+ [$no_output_re],
+ [$no_output_re],
'pg_amcheck excluding all corrupt schemas');
$node->command_checks_all(
[
- @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check',
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--parent-check',
'--checkunique', 'db1'
],
2,
$node->command_checks_all(
[
- @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed',
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--heapallindexed',
'--rootdescend', '--checkunique', 'db1'
],
2,
$node->command_checks_all(
[
- @cmd, '--checkunique', '-d', 'db1', '-d', 'db2',
- '-d', 'db3', '-S', 's*'
+ @cmd,
+ '--checkunique',
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3',
+ '--exclude-schema' => 's*'
],
0,
[$no_output_re],
# Check that pg_amcheck runs against the uncorrupted table without error.
$node->command_ok(
- [ 'pg_amcheck', '-p', $port, 'postgres' ],
+ [ 'pg_amcheck', '--port' => $port, 'postgres' ],
'pg_amcheck test table, prior to corruption');
# Check that pg_amcheck runs against the uncorrupted table and index without error.
-$node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ],
+$node->command_ok(
+ [ 'pg_amcheck', '--port' => $port, 'postgres' ],
'pg_amcheck test table and index, prior to corruption');
$node->stop;
# Run pg_amcheck against the corrupt table with epoch=0, comparing actual
# corruption messages against the expected messages
$node->command_checks_all(
- [ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ],
+ [ 'pg_amcheck', '--no-dependent-indexes', '--port' => $port, 'postgres' ],
2, [@expected], [], 'Expected corruption message output');
$node->safe_psql(
'postgres', qq(
));
# We have not yet broken the index, so we should get no corruption
-$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+$node->command_like([ 'pg_amcheck', '--port' => $node->port, 'postgres' ],
qr/^$/,
'pg_amcheck all schemas, tables and indexes reports no corruption');
# Index corruption should now be reported
$node->command_checks_all(
- [ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+ [ 'pg_amcheck', '--port' => $node->port, 'postgres' ],
2,
[qr/item order invariant violated for index "fickleidx"/],
[],
# We should get no corruptions
$node->command_like(
- [ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ],
+ [ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ],
qr/^$/,
'pg_amcheck all schemas, tables and indexes reports no corruption');
# Unique index corruption should now be reported
$node->command_checks_all(
- [ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ],
+ [ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ],
2,
[qr/index uniqueness is violated for index "bttest_unique_idx"/],
[],
# Initialize node without replication settings
$node->init(
extra => ['--data-checksums'],
- auth_extra => [ '--create-role', 'backupuser' ]);
+ auth_extra => [ '--create-role' => 'backupuser' ]);
$node->start;
my $pgdata = $node->data_dir;
# Sanity checks for options
$node->command_fails_like(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none:1' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => 'none:1'
+ ],
qr/\Qcompression algorithm "none" does not accept a compression level/,
'failure if method "none" specified with compression level');
$node->command_fails_like(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none+' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => 'none+'
+ ],
qr/\Qunrecognized compression algorithm: "none+"/,
'failure on incorrect separator to define compression level');
$node->reload;
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup" ],
'pg_basebackup fails because of WAL configuration');
ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
or BAIL_OUT("unable to create $tempdir/backup");
append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data");
-$node->command_fails([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-n' ],
+$node->command_fails(
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup", '-n' ],
'failing run with no-clean option');
ok(-d "$tempdir/backup", 'backup directory was created and left behind');
my $sfail = quotemeta($server_fails . $cft->[1]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
- "$tempdir/backup", '--compress',
- $cft->[0]
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => $cft->[0],
],
qr/$cfail/,
'client ' . $cft->[2]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
- "$tempdir/backup", '--compress',
- 'server-' . $cft->[0]
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => 'server-' . $cft->[0],
],
qr/$sfail/,
'server ' . $cft->[2]);
# Run base backup.
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup",
+ '--wal-method' => 'none'
+ ],
'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup2", '--no-manifest',
- '--waldir', "$tempdir/xlog2"
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup2",
+ '--no-manifest',
+ '--waldir' => "$tempdir/xlog2"
],
'separate xlog directory');
ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
rmtree("$tempdir/backup2");
rmtree("$tempdir/xlog2");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup", '-Ft' ],
+$node->command_ok(
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/tarbackup",
+ '--format' => 'tar'
+ ],
'tar format');
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
rmtree("$tempdir/tarbackup");
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T=/foo" ],
- '-T with empty old directory fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '=/foo'
+ ],
+ '--tablespace-mapping with empty old directory fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=" ],
- '-T with empty new directory fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '/foo='
+ ],
+ '--tablespace-mapping with empty new directory fails');
$node->command_fails(
[
- @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp',
- "-T/foo=/bar=/baz"
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '/foo=/bar=/baz'
],
- '-T with multiple = fails');
+ '--tablespace-mapping with multiple = fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo=/bar" ],
- '-T with old directory not absolute fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => 'foo=/bar'
+ ],
+ '--tablespace-mapping with old directory not absolute fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=bar" ],
- '-T with new directory not absolute fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '/foo=bar'
+ ],
+ '--tablespace-mapping with new directory not absolute fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo" ],
- '-T with invalid format fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => 'foo'
+ ],
+ '--tablespace-mapping with invalid format fails');
my $superlongname = "superlongname_" . ("x" x 100);
# Tar format doesn't support filenames longer than 100 bytes.
or die "unable to create file $superlongpath";
close $file;
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l1", '-Ft' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/tarbackup_l1",
+ '--format' => 'tar'
+ ],
'pg_basebackup tar with long name fails');
unlink "$superlongpath";
}
$node->safe_psql('postgres',
"CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
. "INSERT INTO test1 VALUES (1234);");
-$node->backup('tarbackup2', backup_options => ['-Ft']);
+$node->backup('tarbackup2', backup_options => [ '--format' => 'tar' ]);
# empty test1, just so that it's different from the to-be-restored data
$node->safe_psql('postgres', "TRUNCATE TABLE test1;");
}
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup1",
+ '--format' => 'plain'
+ ],
'plain format with tablespaces fails without tablespace mapping');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup1", '-Fp',
- "-T$realTsDir=$tempdir/tbackup/tblspc1",
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup1",
+ '--format' => 'plain',
+ '--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tblspc1",
],
'plain format with tablespaces succeeds with tablespace mapping');
ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated');
$realTsDir =~ s/=/\\=/;
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup3", '-Fp',
- "-T$realTsDir=$tempdir/tbackup/tbl\\=spc2",
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup3",
+ '--format' => 'plain',
+ '--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tbl\\=spc2",
],
'mapping tablespace with = sign in path');
ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated');
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/tarbackup_l3",
+ '--format' => 'tar'
+ ],
'pg_basebackup tar with long symlink target');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
rmtree("$tempdir/tarbackup_l3");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ],
- 'pg_basebackup -R runs');
+$node->command_ok(
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupR",
+ '--write-recovery-conf'
+ ],
+ 'pg_basebackup --write-recovery-conf runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
qr/^primary_conninfo = '.*port=$port.*'\n/m,
'postgresql.auto.conf sets primary_conninfo');
-$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxd" ],
+$node->command_ok([ @pg_basebackup_defs, '--pgdata' => "$tempdir/backupxd" ],
'pg_basebackup runs in default xlog mode');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxd");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ],
- 'pg_basebackup -X fetch runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxf",
+ '--wal-method' => 'fetch'
+ ],
+ 'pg_basebackup --wal-method fetch runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxf/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxf");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs", '-X', 'stream' ],
- 'pg_basebackup -X stream runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs",
+ '--wal-method' => 'stream'
+ ],
+ 'pg_basebackup --wal-method stream runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxs");
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
- '-Ft'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxst",
+ '--wal-method' => 'stream',
+ '--format' => 'tar'
],
- 'pg_basebackup -X stream runs in tar mode');
+ 'pg_basebackup --wal-method stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupnoslot", '-X',
- 'stream', '--no-slot'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupnoslot",
+ '--wal-method' => 'stream',
+ '--no-slot'
],
- 'pg_basebackup -X stream runs with --no-slot');
+ 'pg_basebackup --wal-method stream runs with --no-slot');
rmtree("$tempdir/backupnoslot");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ],
- 'pg_basebackup -X fetch runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxf",
+ '--wal-method' => 'fetch'
+ ],
+ 'pg_basebackup --wal-method fetch runs');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole' ],
+ [ @pg_basebackup_defs, '--target' => 'blackhole' ],
qr/WAL cannot be streamed when a backup target is specified/,
- 'backup target requires -X');
+ 'backup target requires --wal-method');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'stream' ],
+ [
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'stream'
+ ],
qr/WAL cannot be streamed when a backup target is specified/,
- 'backup target requires -X other than -X stream');
+ 'backup target requires --wal-method other than --wal-method stream');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'bogus', '-X', 'none' ],
+ [ @pg_basebackup_defs, '--target' => 'bogus', '--wal-method' => 'none' ],
qr/unrecognized target/,
'backup target unrecognized');
$node->command_fails_like(
[
- @pg_basebackup_defs, '--target', 'blackhole', '-X',
- 'none', '-D', "$tempdir/blackhole"
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'none',
+ '--pgdata' => "$tempdir/blackhole"
],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-Ft' ],
+ [
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'none',
+ '--format' => 'tar'
+ ],
qr/cannot specify both format and backup target/,
'backup target and output directory');
$node->command_ok(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ],
+ [
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'none'
+ ],
'backup target blackhole');
$node->command_ok(
[
- @pg_basebackup_defs, '--target',
- "server:$tempdir/backuponserver", '-X',
- 'none'
+ @pg_basebackup_defs,
+ '--target' => "server:$tempdir/backuponserver",
+ '--wal-method' => 'none'
],
'backup target server');
ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
'create backup user');
$node->command_ok(
[
- @pg_basebackup_defs, '-U', 'backupuser', '--target',
- "server:$tempdir/backuponserver",
- '-X', 'none'
+ @pg_basebackup_defs,
+ '--username' => 'backupuser',
+ '--target' => "server:$tempdir/backuponserver",
+ '--wal-method' => 'none'
],
'backup target server');
ok( -f "$tempdir/backuponserver/base.tar",
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
- 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl_fail",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ],
- 'pg_basebackup -C fails without slot name');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot'
+ ],
+ 'pg_basebackup --create-slot fails without slot name');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot',
+ '--slot' => 'slot0',
'--no-slot'
],
- 'pg_basebackup fails with -C -S --no-slot');
+ 'pg_basebackup fails with --create-slot --slot --no-slot');
$node->command_fails_like(
[
- @pg_basebackup_defs, '--target', 'blackhole', '-D',
- "$tempdir/blackhole"
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--pgdata' => "$tempdir/blackhole"
],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backuptr/co", '-X', 'none' ],
- 'pg_basebackup -X fetch runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backuptr/co",
+ '--wal-method' => 'none'
+ ],
+ 'pg_basebackup --wal-method fetch runs');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
- 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl_fail",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ],
- 'pg_basebackup -C fails without slot name');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot'
+ ],
+ 'pg_basebackup --create-slot fails without slot name');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot',
+ '--slot' => 'slot0',
'--no-slot'
],
- 'pg_basebackup fails with -C -S --no-slot');
+ 'pg_basebackup fails with --create-slot --slot --no-slot');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot", '-C',
- '-S', 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot',
+ '--slot' => 'slot0'
],
- 'pg_basebackup -C runs');
+ 'pg_basebackup --create-slot runs');
rmtree("$tempdir/backupxs_slot");
is( $node->safe_psql(
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot1", '-C',
- '-S', 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot1",
+ '--create-slot',
+ '--slot' => 'slot0'
],
- 'pg_basebackup fails with -C -S and a previously existing slot');
+ 'pg_basebackup fails with --create-slot --slot and a previously existing slot'
+);
$node->safe_psql('postgres',
q{SELECT * FROM pg_create_physical_replication_slot('slot1')});
is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
[
- @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
- 'slot1', '-X', 'none'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/fail",
+ '--slot' => 'slot1',
+ '--wal-method' => 'none'
],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X',
- 'stream', '-S', 'slot1'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot1'
],
- 'pg_basebackup -X stream with replication slot runs');
+ 'pg_basebackup --wal-method stream with replication slot runs');
$lsn = $node->safe_psql('postgres',
q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1'}
);
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
- 'stream', '-S', 'slot1', '-R',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl_R",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot1',
+ '--write-recovery-conf',
],
- 'pg_basebackup with replication slot and -R runs');
+ 'pg_basebackup with replication slot and --write-recovery-conf runs');
like(
slurp_file("$tempdir/backupxs_sl_R/postgresql.auto.conf"),
qr/^primary_slot_name = 'slot1'\n/m,
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backup_dbname_R", '-X',
- 'stream', '-d', "dbname=db1", '-R',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_dbname_R",
+ '--wal-method' => 'stream',
+ '--dbname' => "dbname=db1",
+ '--write-recovery-conf',
],
- 'pg_basebackup with dbname and -R runs');
+ 'pg_basebackup with dbname and --write-recovery-conf runs');
like(slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"),
qr/dbname=db1/m, 'recovery conf file sets dbname');
$node->start;
$node->command_checks_all(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt" ],
1,
[qr{^$}],
[qr/^WARNING.*checksum verification failed/s],
$node->start;
$node->command_checks_all(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt2" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt2" ],
1,
[qr{^$}],
[qr/^WARNING.*further.*failures.*will.not.be.reported/s],
$node->start;
$node->command_checks_all(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt3" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt3" ],
1,
[qr{^$}],
[qr/^WARNING.*7 total checksum verification failures/s],
# do not verify checksums, should return ok
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_corrupt4", '--no-verify-checksums',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_corrupt4",
+ '--no-verify-checksums',
],
'pg_basebackup with -k does not report checksum mismatch');
rmtree("$tempdir/backup_corrupt4");
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_gzip", '--compress',
- '1', '--format',
- 't'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_gzip",
+ '--compress' => '1',
+ '--format' => 't'
],
'pg_basebackup with --compress');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_gzip2", '--gzip',
- '--format', 't'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_gzip2",
+ '--gzip',
+ '--format' => 't'
],
'pg_basebackup with --gzip');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_gzip3", '--compress',
- 'gzip:1', '--format',
- 't'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_gzip3",
+ '--compress' => 'gzip:1',
+ '--format' => 't'
],
'pg_basebackup with --compress=gzip:1');
my $sigchld_bb = IPC::Run::start(
[
@pg_basebackup_defs, '--wal-method=stream',
- '-D', "$tempdir/sigchld",
- '--max-rate=32', '-d',
- $node->connstr('postgres')
- ],
- '<',
- \$sigchld_bb_stdin,
- '>',
- \$sigchld_bb_stdout,
- '2>',
- \$sigchld_bb_stderr,
+ '--pgdata' => "$tempdir/sigchld",
+ '--max-rate' => '32',
+ '--dbname' => $node->connstr('postgres')
+ ],
+ '<' => \$sigchld_bb_stdin,
+ '>' => \$sigchld_bb_stdout,
+ '2>' => \$sigchld_bb_stderr,
$sigchld_bb_timeout);
is( $node->poll_query_until(
$node2->command_fails_like(
[
- @pg_basebackup_defs, '-D',
- "$tempdir" . '/diff_sysid', '--incremental',
- "$backupdir" . '/backup_manifest'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/diff_sysid",
+ '--incremental' => "$backupdir/backup_manifest",
],
qr/system identifier in backup manifest is .*, but database system identifier is/,
"pg_basebackup fails with different database system manifest");
# to keep test times reasonable. Using @pg_basebackup_defs as the first
# element of the array passed to IPC::Run interpolate the array (as it is
# not a reference to an array)...
-my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
+my @pg_basebackup_defs =
+ ('pg_basebackup', '--no-sync', '--checkpoint' => 'fast');
# Set up an instance.
my $node = PostgreSQL::Test::Cluster->new('main');
# Back it up.
my $backupdir = $tempdir . '/backup';
$node->command_ok(
- [ @pg_basebackup_defs, '-D', $backupdir, '-Ft', '-X', 'none' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => $backupdir,
+ '--format' => 'tar',
+ '--wal-method' => 'none'
+ ],
'pg_basebackup runs');
# Make sure we got base.tar and one tablespace.
$primary->command_fails(['pg_receivewal'],
'pg_receivewal needs target directory specified');
$primary->command_fails(
- [ 'pg_receivewal', '-D', $stream_dir, '--create-slot', '--drop-slot' ],
+ [
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--create-slot',
+ '--drop-slot',
+ ],
'failure if both --create-slot and --drop-slot specified');
$primary->command_fails(
- [ 'pg_receivewal', '-D', $stream_dir, '--create-slot' ],
+ [ 'pg_receivewal', '--directory' => $stream_dir, '--create-slot' ],
'failure if --create-slot specified without --slot');
$primary->command_fails(
- [ 'pg_receivewal', '-D', $stream_dir, '--synchronous', '--no-sync' ],
+ [
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--synchronous',
+ '--no-sync',
+ ],
'failure if --synchronous specified with --no-sync');
$primary->command_fails_like(
- [ 'pg_receivewal', '-D', $stream_dir, '--compress', 'none:1', ],
+ [
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--compress' => 'none:1',
+ ],
qr/\Qpg_receivewal: error: invalid compression specification: compression algorithm "none" does not accept a compression level/,
'failure if --compress none:N (where N > 0)');
# Slot creation and drop
my $slot_name = 'test';
$primary->command_ok(
- [ 'pg_receivewal', '--slot', $slot_name, '--create-slot' ],
+ [ 'pg_receivewal', '--slot' => $slot_name, '--create-slot' ],
'creating a replication slot');
my $slot = $primary->slot($slot_name);
is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
-$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
+$primary->command_ok(
+ [ 'pg_receivewal', '--slot' => $slot_name, '--drop-slot' ],
'dropping a replication slot');
is($primary->slot($slot_name)->{'slot_type'},
'', 'replication slot was removed');
# compression involved.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--synchronous', '--no-loop'
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--synchronous',
+ '--no-loop',
],
'streaming some WAL with --synchronous');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--compress', 'gzip:1',
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--compress' => 'gzip:1',
'--no-loop'
],
"streaming some WAL using ZLIB compression");
# Stream up to the given position.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop', '--compress',
- 'lz4'
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--no-loop',
+ '--compress' => 'lz4'
],
'streaming some WAL using --compress=lz4');
$primary->psql('postgres', 'INSERT INTO test_table VALUES (4);');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop'
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--no-loop'
],
"streaming some WAL");
# Check case where the slot does not exist.
$primary->command_fails_like(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
- 'nonexistentslot', '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ 'pg_receivewal',
+ '--directory' => $slot_dir,
+ '--slot' => 'nonexistentslot',
+ '--no-loop',
+ '--no-sync',
+ '--verbose',
+ '--endpos' => $nextlsn
],
qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/,
'pg_receivewal fails with non-existing slot');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
- $slot_name, '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ 'pg_receivewal',
+ '--directory' => $slot_dir,
+ '--slot' => $slot_name,
+ '--no-loop',
+ '--no-sync',
+ '--verbose',
+ '--endpos' => $nextlsn
],
"WAL streamed from the slot's restart_lsn");
ok(-e "$slot_dir/$walfile_streamed",
$standby->command_ok(
[
- 'pg_receivewal', '-D', $timeline_dir, '--verbose',
- '--endpos', $nextlsn, '--slot', $archive_slot,
- '--no-sync', '-n'
+ 'pg_receivewal',
+ '--directory' => $timeline_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--slot' => $archive_slot,
+ '--no-sync',
+ '--no-loop'
],
"Stream some wal after promoting, resuming from the slot's position");
ok(-e "$timeline_dir/$walfile_before_promotion",
$node->start;
$node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name');
-$node->command_fails([ 'pg_recvlogical', '-S', 'test' ],
+$node->command_fails(
+ [ 'pg_recvlogical', '--slot' => 'test' ],
'pg_recvlogical needs a database');
-$node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
+$node->command_fails(
+ [ 'pg_recvlogical', '--slot' => 'test', '--dbname' => 'postgres' ],
'pg_recvlogical needs an action');
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--start'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
],
'no destination file');
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--create-slot'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--create-slot',
],
'slot created');
$node->command_ok(
[
- 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
- '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
+ '--endpos' => $nextlsn,
+ '--no-loop',
+ '--file' => '-',
],
'replayed a transaction');
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--drop-slot'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--drop-slot'
],
'slot dropped');
#test with two-phase option enabled
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--create-slot',
- '--two-phase'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--create-slot',
+ '--two-phase',
],
'slot with two-phase created');
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--start',
- '--endpos', "$nextlsn",
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
+ '--endpos' => $nextlsn,
'--two-phase', '--no-loop',
- '-f', '-'
+ '--file' => '-',
],
'incorrect usage');
$node->command_ok(
[
- 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
- '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
+ '--endpos' => $nextlsn,
+ '--no-loop',
+ '--file' => '-',
],
'replayed a two-phase transaction');
command_fails(['pg_createsubscriber'],
'no subscriber data directory specified');
command_fails(
- [ 'pg_createsubscriber', '--pgdata', $datadir ],
+ [ 'pg_createsubscriber', '--pgdata' => $datadir ],
'no publisher connection string specified');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
],
'no database name specified');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--database', 'pg1',
- '--database', 'pg1'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--database' => 'pg1',
+ '--database' => 'pg1',
],
'duplicate database name');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--publication', 'foo1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--publication' => 'foo1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'duplicate publication name');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'wrong number of publication names');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--publication', 'foo2',
- '--subscription', 'bar1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--publication' => 'foo2',
+ '--subscription' => 'bar1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'wrong number of subscription names');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--publication', 'foo2',
- '--subscription', 'bar1',
- '--subscription', 'bar2',
- '--replication-slot', 'baz1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--publication' => 'foo2',
+ '--subscription' => 'bar1',
+ '--subscription' => 'bar2',
+ '--replication-slot' => 'baz1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'wrong number of replication slot names');
# Run pg_createsubscriber on a promoted server
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_t->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_t->host, '--subscriber-port',
- $node_t->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_t->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_t->host,
+ '--subscriber-port' => $node_t->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'target server is not in recovery');
# Run pg_createsubscriber when standby is running
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'standby is up and running');
# Run pg_createsubscriber on about-to-fail node F
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $node_f->data_dir,
- '--publisher-server', $node_p->connstr($db1),
- '--socketdir', $node_f->host,
- '--subscriber-port', $node_f->port,
- '--database', $db1,
- '--database', $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $node_f->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_f->host,
+ '--subscriber-port' => $node_f->port,
+ '--database' => $db1,
+ '--database' => $db2
],
'subscriber data directory is not a copy of the source database cluster');
# Run pg_createsubscriber on node C (P -> S -> C)
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_c->data_dir, '--publisher-server',
- $node_s->connstr($db1), '--socketdir',
- $node_c->host, '--subscriber-port',
- $node_c->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_c->data_dir,
+ '--publisher-server' => $node_s->connstr($db1),
+ '--socketdir' => $node_c->host,
+ '--subscriber-port' => $node_c->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'primary server is in recovery');
$node_s->stop;
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--database' => $db1,
+ '--database' => $db2,
+
],
'primary contains unmet conditions on node P');
# Restore default settings here but only apply it after testing standby. Some
});
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'standby contains unmet conditions on node S');
$node_s->append_conf(
# dry run mode on node S
command_ok(
[
- 'pg_createsubscriber', '--verbose',
- '--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default",
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--publication',
- 'pub1', '--publication',
- 'pub2', '--subscription',
- 'sub1', '--subscription',
- 'sub2', '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default,
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--publication' => 'pub1',
+ '--publication' => 'pub2',
+ '--subscription' => 'sub1',
+ '--subscription' => 'sub2',
+ '--database' => $db1,
+ '--database' => $db2,
],
'run pg_createsubscriber --dry-run on node S');
# pg_createsubscriber can run without --databases option
command_ok(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--replication-slot',
- 'replslot1'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--replication-slot' => 'replslot1',
],
'run pg_createsubscriber without --databases');
-# Run pg_createsubscriber on node S
+# Run pg_createsubscriber on node S. --verbose is used twice
+# to show more information.
command_ok(
[
- 'pg_createsubscriber', '--verbose',
- '--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default",
- '--verbose', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--publication',
- 'pub1', '--publication',
- 'Pub2', '--replication-slot',
- 'replslot1', '--replication-slot',
- 'replslot2', '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose', '--verbose',
+ '--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default,
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--publication' => 'pub1',
+ '--publication' => 'pub2',
+ '--replication-slot' => 'replslot1',
+ '--replication-slot' => 'replslot2',
+ '--database' => $db1,
+ '--database' => $db2,
],
'run pg_createsubscriber on node S');
# corrupted yet.
command_ok(
[
- 'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ 'pg_checksums',
+ '--check',
+ '--pgdata' => $pgdata,
+ '--filenode' => $relfilenode_corrupted,
],
"succeeds for single relfilenode on tablespace $tablespace with offline cluster"
);
# Checksum checks on single relfilenode fail
$node->command_checks_all(
[
- 'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ 'pg_checksums',
+ '--check',
+ '--pgdata' => $pgdata,
+ '--filenode' => $relfilenode_corrupted,
],
1,
[qr/Bad checksums:.*1/],
# Global checksum checks fail as well
$node->command_checks_all(
- [ 'pg_checksums', '--check', '-D', $pgdata ],
+ [ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
1,
[qr/Bad checksums:.*1/],
[qr/checksum verification failed/],
$node->start;
$node->safe_psql('postgres', "DROP TABLE $table;");
$node->stop;
- $node->command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+ $node->command_ok(
+ [ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"succeeds again after table drop on tablespace $tablespace");
$node->start;
unless ($Config{osname} eq 'darwin');
# Enable checksums.
-command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
"checksums successfully enabled in cluster");
# Successive attempt to enable checksums fails.
-command_fails([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+command_fails(
+ [ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
"enabling checksums fails if already enabled");
# Control file should know that checksums are enabled.
# Disable checksums again. Flush result here as that should be cheap.
command_ok(
- [ 'pg_checksums', '--disable', '-D', $pgdata ],
+ [ 'pg_checksums', '--disable', '--pgdata' => $pgdata ],
"checksums successfully disabled in cluster");
# Successive attempt to disable checksums fails.
command_fails(
- [ 'pg_checksums', '--disable', '--no-sync', '-D', $pgdata ],
+ [ 'pg_checksums', '--disable', '--no-sync', '--pgdata' => $pgdata ],
"disabling checksums fails if already disabled");
# Control file should know that checksums are disabled.
'checksums disabled in control file');
# Enable checksums again for follow-up tests.
-command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
"checksums successfully enabled in cluster");
# Control file should know that checksums are enabled.
'checksums enabled in control file');
# Checksums pass on a newly-created cluster
-command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"succeeds with offline cluster");
# Checksums are verified if no other arguments are specified
command_ok(
- [ 'pg_checksums', '-D', $pgdata ],
+ [ 'pg_checksums', '--pgdata' => $pgdata ],
"verifies checksums as default action");
# Specific relation files cannot be requested when action is --disable
# or --enable.
command_fails(
- [ 'pg_checksums', '--disable', '--filenode', '1234', '-D', $pgdata ],
+ [
+ 'pg_checksums',
+ '--disable',
+ '--filenode' => '1234',
+ '--pgdata' => $pgdata
+ ],
"fails when relfilenodes are requested and action is --disable");
command_fails(
- [ 'pg_checksums', '--enable', '--filenode', '1234', '-D', $pgdata ],
+ [
+ 'pg_checksums',
+ '--enable',
+ '--filenode' => '1234',
+ '--pgdata' => $pgdata
+ ],
"fails when relfilenodes are requested and action is --enable");
# Test postgres -C for an offline cluster.
# account on Windows.
command_checks_all(
[
- 'pg_ctl', 'start', '-D', $pgdata, '-s', '-o',
- '-C data_checksums -c log_min_messages=fatal'
+ 'pg_ctl', 'start',
+ '--silent',
+ '--pgdata' => $pgdata,
+ '-o' => '-C data_checksums -c log_min_messages=fatal',
],
1,
[qr/^on$/],
# Checks cannot happen with an online cluster
$node->start;
-command_fails([ 'pg_checksums', '--check', '-D', $pgdata ],
+command_fails([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"fails with online cluster");
# Check corruption of table on default tablespace.
append_to_file $file_name, "foo";
$node->command_checks_all(
- [ 'pg_checksums', '--check', '-D', $pgdata ],
+ [ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
1,
[qr/^$/],
[qr/could not read block 0 in file.*$file\":/],
# when verifying checksums.
mkdir "$tablespace_dir/PG_99_999999991/";
append_to_file "$tablespace_dir/PG_99_999999991/foo", "123";
-command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"succeeds with foreign tablespace");
# Authorized relation files filled with corrupted data cause the
mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!";
$primary->command_ok(
[
- 'pg_basebackup', '-D',
- $backup1path, '--no-sync',
- '-cfast', "-T${tsprimary}=${tsbackup1path}"
+ 'pg_basebackup',
+ '--no-sync',
+ '--pgdata' => $backup1path,
+ '--checkpoint' => 'fast',
+ '--tablespace-mapping' => "${tsprimary}=${tsbackup1path}"
],
"full backup");
mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!";
$primary->command_ok(
[
- 'pg_basebackup', '-D',
- $backup2path, '--no-sync',
- '-cfast', "-T${tsprimary}=${tsbackup2path}",
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--no-sync',
+ '--pgdata' => $backup2path,
+ '--checkpoint' => 'fast',
+ '--tablespace-mapping' => "${tsprimary}=${tsbackup2path}",
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
my $dump2 = $backupdir . '/pitr2.dump';
$pitr1->command_ok(
[
- 'pg_dumpall', '-f',
- $dump1, '--no-sync',
- '--no-unlogged-table-data', '-d',
- $pitr1->connstr('postgres'),
+ 'pg_dumpall',
+ '--no-sync',
+ '--no-unlogged-table-data',
+ '--file' => $dump1,
+ '--dbname' => $pitr1->connstr('postgres'),
],
'dump from PITR 1');
$pitr2->command_ok(
[
- 'pg_dumpall', '-f',
- $dump2, '--no-sync',
- '--no-unlogged-table-data', '-d',
- $pitr2->connstr('postgres'),
+ 'pg_dumpall',
+ '--no-sync',
+ '--no-unlogged-table-data',
+ '--file' => $dump2,
+ '--dbname' => $pitr2->connstr('postgres'),
],
'dump from PITR 2');
# Take a full backup.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup from node1");
# Insert a second row on the original node.
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup from node1");
my $backup3path = $node1->backup_dir . '/backup3';
$node2->command_ok(
[
- 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
- '--incremental', $backup2path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup3path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup2path . '/backup_manifest'
],
"incremental backup from node2");
# Take a full backup.
my $original_backup_path = $node->backup_dir . '/original';
$node->command_ok(
- [ 'pg_basebackup', '-D', $original_backup_path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $original_backup_path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup");
# Verify the full backup.
my $revised_backup_path = $node->backup_dir . '/' . $backup_name;
$node->command_ok(
[
- 'pg_combinebackup', $original_backup_path,
- '-o', $revised_backup_path,
- '--no-sync', @extra_options
+ 'pg_combinebackup',
+ $original_backup_path,
+ '--output' => $revised_backup_path,
+ '--no-sync',
+ @extra_options,
],
"pg_combinebackup with @extra_options");
if (defined $failure_pattern)
# Take a full backup from node1.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup from node1");
# Now take an incremental backup.
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest',
],
"incremental backup from node1");
my $backup3path = $node1->backup_dir . '/backup3';
$node1->command_ok(
[
- 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
- '--incremental', $backup2path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup3path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup2path . '/backup_manifest',
],
"another incremental backup from node1");
# Take a full backup from node2.
my $backupother1path = $node1->backup_dir . '/backupother1';
$node2->command_ok(
- [ 'pg_basebackup', '-D', $backupother1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backupother1path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup from node2");
# Take an incremental backup from node2.
my $backupother2path = $node1->backup_dir . '/backupother2';
$node2->command_ok(
[
- 'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast',
- '--incremental', $backupother1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backupother2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backupother1path . '/backup_manifest',
],
"incremental backup from node2");
# Can't combine 2 full backups.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup1path, $backup1path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup1path, $backup1path,
+ '--output' => $resultpath,
+ $mode,
],
qr/is a full backup, but only the first backup should be a full backup/,
"can't combine full backups");
# Can't combine 2 incremental backups.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup2path, $backup2path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup2path, $backup2path,
+ '--output' => $resultpath,
+ $mode,
],
qr/is an incremental backup, but the first backup should be a full backup/,
"can't combine full backups");
# Can't combine full backup with an incremental backup from a different system.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup1path, $backupother2path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup1path, $backupother2path,
+ '--output' => $resultpath,
+ $mode,
],
qr/expected system identifier.*but found/,
"can't combine backups from different nodes");
$node1->command_fails_like(
[
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
- '-o', $resultpath, $mode
+ '--output' => $resultpath,
+ $mode,
],
qr/ manifest system identifier is .*, but control file has /,
"can't combine backups with different manifest system identifier ");
# Can't omit a required backup.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup1path, $backup3path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup1path, $backup3path,
+ '--output' => $resultpath,
+ $mode,
],
qr/starts at LSN.*but expected/,
"can't omit a required backup");
$node1->command_fails_like(
[
'pg_combinebackup', $backup1path, $backup3path, $backup2path,
- '-o', $resultpath, $mode
+ '--output' => $resultpath,
+ $mode,
],
qr/starts at LSN.*but expected/,
"can't combine backups in the wrong order");
$node1->command_ok(
[
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
- '-o', $resultpath, $mode
+ '--output' => $resultpath,
+ $mode,
],
"can combine 3 matching backups");
rmtree($resultpath);
my $synthetic12path = $node1->backup_dir . '/synthetic12';
$node1->command_ok(
[
- 'pg_combinebackup', $backup1path, $backup2path, '-o',
- $synthetic12path, $mode
+ 'pg_combinebackup', $backup1path, $backup2path,
+ '--output' => $synthetic12path,
+ $mode,
],
"can combine 2 matching backups");
# Can combine result of previous step with second incremental.
$node1->command_ok(
[
- 'pg_combinebackup', $synthetic12path,
- $backup3path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $synthetic12path, $backup3path,
+ '--output' => $resultpath,
+ $mode,
],
"can combine synthetic backup with later incremental");
rmtree($resultpath);
# Can't combine result of 1+2 with 2.
$node1->command_fails_like(
[
- 'pg_combinebackup', $synthetic12path,
- $backup2path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $synthetic12path, $backup2path,
+ '--output' => $resultpath,
+ $mode,
],
qr/starts at LSN.*but expected/,
"can't combine synthetic backup with included incremental");
# Take a full backup.
my $backup1path = $primary->backup_dir . '/backup1';
$primary->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup");
# Now make some database changes.
my $backup2path = $primary->backup_dir . '/backup2';
$primary->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
# Take a full backup.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup");
# Switch to wal_level=minimal, which also requires max_wal_senders=0 and
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_fails_like(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
qr/WAL summaries are required on timeline 1 from.*are incomplete/,
"incremental backup fails");
# Take a full backup.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup from node1");
# Checkpoint and record LSN after.
my $backup2path = $node1->backup_dir . '/backup2';
$node2->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest',
],
"incremental backup from node2");
# Take a full backup.
my $backup1path = $primary->backup_dir . '/backup1';
$primary->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup");
# Take an incremental backup.
my $backup2path = $primary->backup_dir . '/backup2';
$primary->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
# pg_combinebackup should fail.
my $outpath = $primary->backup_dir . '/out';
$primary->command_fails_like(
- [ 'pg_combinebackup', $backup1path, $backup2path, '-o', $outpath, ],
+ [
+ 'pg_combinebackup', $backup1path,
+ $backup2path, '--output' => $outpath,
+ ],
qr/full backup contains unexpected incremental file/,
"pg_combinebackup fails");
program_version_ok('pg_ctl');
program_options_handling_ok('pg_ctl');
-command_exit_is([ 'pg_ctl', 'start', '-D', "$tempdir/nonexistent" ],
+command_exit_is([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/nonexistent" ],
1, 'pg_ctl start with nonexistent directory');
-command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data", '-o', '-N' ],
+command_ok(
+ [
+ 'pg_ctl', 'initdb',
+ '--pgdata' => "$tempdir/data",
+ '--options' => '--no-sync'
+ ],
'pg_ctl initdb');
command_ok([ $ENV{PG_REGRESS}, '--config-auth', "$tempdir/data" ],
'configure authentication');
}
close $conf;
my $ctlcmd = [
- 'pg_ctl', 'start', '-D', "$tempdir/data", '-l',
- "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log"
+ 'pg_ctl', 'start',
+ '--pgdata' => "$tempdir/data",
+ '--log' => "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log"
];
command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start');
# postmaster they start. Waiting more than the 2 seconds slop time allowed
# by wait_for_postmaster() prevents that mistake.
sleep 3 if ($windows_os);
-command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
+command_fails([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/data" ],
'second pg_ctl start fails');
-command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], 'pg_ctl stop');
-command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
+command_ok([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ],
+ 'pg_ctl stop');
+command_fails([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ],
'second pg_ctl stop fails');
# Log file for default permission test. The permissions won't be checked on
# Windows but we still want to do the restart test.
my $logFileName = "$tempdir/data/perm-test-600.log";
-command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ],
+command_ok(
+ [
+ 'pg_ctl', 'restart',
+ '--pgdata' => "$tempdir/data",
+ '--log' => $logFileName
+ ],
'pg_ctl restart with server not running');
# Permissions on log file should be default
skip "group access not supported on Windows", 3
if ($windows_os || $Config::Config{osname} eq 'cygwin');
- system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data";
+ system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data";
# Change the data dir mode so log file will be created with group read
# privileges on the next start
chmod_recursive("$tempdir/data", 0750, 0640);
command_ok(
- [ 'pg_ctl', 'start', '-D', "$tempdir/data", '-l', $logFileName ],
+ [
+ 'pg_ctl', 'start',
+ '--pgdata' => "$tempdir/data",
+ '--log' => $logFileName
+ ],
'start server to check group permissions');
ok(-f $logFileName);
ok(check_mode_recursive("$tempdir/data", 0750, 0640));
}
-command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
+command_ok([ 'pg_ctl', 'restart', '--pgdata' => "$tempdir/data" ],
'pg_ctl restart with server running');
-system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data";
+system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data";
done_testing();
my $tempdir = PostgreSQL::Test::Utils::tempdir;
-command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
+command_exit_is([ 'pg_ctl', 'status', '--pgdata' => "$tempdir/nonexistent" ],
4, 'pg_ctl status with nonexistent directory');
my $node = PostgreSQL::Test::Cluster->new('main');
$node->init;
-command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ],
+command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ],
3, 'pg_ctl status with server not running');
-system_or_bail 'pg_ctl', '-l', "$tempdir/logfile", '-D',
- $node->data_dir, '-w', 'start';
-command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ],
+system_or_bail(
+ 'pg_ctl',
+ '--log' => "$tempdir/logfile",
+ '--pgdata' => $node->data_dir,
+ '--wait', 'start');
+command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ],
0, 'pg_ctl status with server running');
-system_or_bail 'pg_ctl', 'stop', '-D', $node->data_dir;
+system_or_bail 'pg_ctl', 'stop', '--pgdata' => $node->data_dir;
done_testing();
my $tempdir = PostgreSQL::Test::Utils::tempdir;
command_fails_like(
- [ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
+ [ 'pg_ctl', '--pgdata' => "$tempdir/nonexistent", 'promote' ],
qr/directory .* does not exist/,
'pg_ctl promote with nonexistent directory');
$node_primary->init(allows_streaming => 1);
command_fails_like(
- [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
+ [ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ],
qr/PID file .* does not exist/,
'pg_ctl promote of not running instance fails');
$node_primary->start;
command_fails_like(
- [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
+ [ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ],
qr/not in standby mode/,
'pg_ctl promote of primary instance fails');
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
't', 'standby is in recovery');
-command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ],
- 'pg_ctl -W promote of standby runs');
+command_ok(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--no-wait', 'promote'
+ ],
+ 'pg_ctl --no-wait promote of standby runs');
ok( $node_standby->poll_query_until(
'postgres', 'SELECT NOT pg_is_in_recovery()'),
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
't', 'standby is in recovery');
-command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, 'promote' ],
+command_ok([ 'pg_ctl', '--pgdata' => $node_standby->data_dir, 'promote' ],
'pg_ctl promote of standby runs');
# no wait here
my %pgdump_runs = (
binary_upgrade => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- '--format=custom',
- "--file=$tempdir/binary_upgrade.dump",
- '-w',
+ 'pg_dump', '--no-sync',
+ '--format' => 'custom',
+ '--file' => "$tempdir/binary_upgrade.dump",
+ '--no-password',
'--schema-only',
'--binary-upgrade',
- '-d', 'postgres', # alternative way to specify database
+ '--dbname' => 'postgres', # alternative way to specify database
],
restore_cmd => [
- 'pg_restore', '-Fc', '--verbose',
- "--file=$tempdir/binary_upgrade.sql",
+ 'pg_restore',
+ '--format' => 'custom',
+ '--verbose',
+ '--file' => "$tempdir/binary_upgrade.sql",
"$tempdir/binary_upgrade.dump",
],
},
test_key => 'compression',
compile_option => 'gzip',
dump_cmd => [
- 'pg_dump', '--format=custom',
- '--compress=1', "--file=$tempdir/compression_gzip_custom.dump",
+ 'pg_dump',
+ '--format' => 'custom',
+ '--compress' => '1',
+ '--file' => "$tempdir/compression_gzip_custom.dump",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/compression_gzip_custom.sql",
+ '--file' => "$tempdir/compression_gzip_custom.sql",
"$tempdir/compression_gzip_custom.dump",
],
command_like => {
command => [
- 'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump",
+ 'pg_restore', '--list',
+ "$tempdir/compression_gzip_custom.dump",
],
expected => qr/Compression: gzip/,
name => 'data content is gzip-compressed'
test_key => 'compression',
compile_option => 'gzip',
dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=gzip:1',
- "--file=$tempdir/compression_gzip_dir", 'postgres',
+ 'pg_dump',
+ '--jobs' => '2',
+ '--format' => 'directory',
+ '--compress' => 'gzip:1',
+ '--file' => "$tempdir/compression_gzip_dir",
+ 'postgres',
],
# Give coverage for manually compressed blobs.toc files during
# restore.
"$tempdir/compression_gzip_dir/*.dat.gz",
],
restore_cmd => [
- 'pg_restore', '--jobs=2',
- "--file=$tempdir/compression_gzip_dir.sql",
+ 'pg_restore',
+ '--jobs' => '2',
+ '--file' => "$tempdir/compression_gzip_dir.sql",
"$tempdir/compression_gzip_dir",
],
},
test_key => 'compression',
compile_option => 'gzip',
dump_cmd => [
- 'pg_dump', '--format=plain', '-Z1',
- "--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres',
+ 'pg_dump',
+ '--format' => 'plain',
+ '--compress' => '1',
+ '--file' => "$tempdir/compression_gzip_plain.sql.gz",
+ 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
test_key => 'compression',
compile_option => 'lz4',
dump_cmd => [
- 'pg_dump', '--format=custom',
- '--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump",
+ 'pg_dump',
+ '--format' => 'custom',
+ '--compress' => 'lz4',
+ '--file' => "$tempdir/compression_lz4_custom.dump",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/compression_lz4_custom.sql",
+ '--file' => "$tempdir/compression_lz4_custom.sql",
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
- command =>
- [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
+ command => [
+ 'pg_restore', '--list',
+ "$tempdir/compression_lz4_custom.dump",
+ ],
expected => qr/Compression: lz4/,
name => 'data content is lz4 compressed'
},
test_key => 'compression',
compile_option => 'lz4',
dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=lz4:1',
- "--file=$tempdir/compression_lz4_dir", 'postgres',
+ 'pg_dump',
+ '--jobs' => '2',
+ '--format' => 'directory',
+ '--compress' => 'lz4:1',
+ '--file' => "$tempdir/compression_lz4_dir",
+ 'postgres',
],
# Verify that data files were compressed
glob_patterns => [
"$tempdir/compression_lz4_dir/*.dat.lz4",
],
restore_cmd => [
- 'pg_restore', '--jobs=2',
- "--file=$tempdir/compression_lz4_dir.sql",
+ 'pg_restore',
+ '--jobs' => '2',
+ '--file' => "$tempdir/compression_lz4_dir.sql",
"$tempdir/compression_lz4_dir",
],
},
test_key => 'compression',
compile_option => 'lz4',
dump_cmd => [
- 'pg_dump', '--format=plain', '--compress=lz4',
- "--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres',
+ 'pg_dump',
+ '--format' => 'plain',
+ '--compress' => 'lz4',
+ '--file' => "$tempdir/compression_lz4_plain.sql.lz4",
+ 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
test_key => 'compression',
compile_option => 'zstd',
dump_cmd => [
- 'pg_dump', '--format=custom',
- '--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump",
+ 'pg_dump',
+ '--format' => 'custom',
+ '--compress' => 'zstd',
+ '--file' => "$tempdir/compression_zstd_custom.dump",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/compression_zstd_custom.sql",
+ '--file' => "$tempdir/compression_zstd_custom.sql",
"$tempdir/compression_zstd_custom.dump",
],
command_like => {
command => [
- 'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
+ 'pg_restore', '--list',
+ "$tempdir/compression_zstd_custom.dump",
],
expected => qr/Compression: zstd/,
name => 'data content is zstd compressed'
test_key => 'compression',
compile_option => 'zstd',
dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=zstd:1',
- "--file=$tempdir/compression_zstd_dir", 'postgres',
+ 'pg_dump',
+ '--jobs' => '2',
+ '--format' => 'directory',
+ '--compress' => 'zstd:1',
+ '--file' => "$tempdir/compression_zstd_dir",
+ 'postgres',
],
# Give coverage for manually compressed blobs.toc files during
# restore.
"$tempdir/compression_zstd_dir/*.dat.zst",
],
restore_cmd => [
- 'pg_restore', '--jobs=2',
- "--file=$tempdir/compression_zstd_dir.sql",
+ 'pg_restore',
+ '--jobs' => '2',
+ '--file' => "$tempdir/compression_zstd_dir.sql",
"$tempdir/compression_zstd_dir",
],
},
test_key => 'compression',
compile_option => 'zstd',
dump_cmd => [
- 'pg_dump', '--format=plain', '--compress=zstd:long',
- "--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres',
+ 'pg_dump',
+ '--format' => 'plain',
+ '--compress' => 'zstd:long',
+ '--file' => "$tempdir/compression_zstd_plain.sql.zst",
+ 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
clean => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/clean.sql",
- '-c',
- '-d', 'postgres', # alternative way to specify database
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/clean.sql",
+ '--clean',
+ '--dbname' => 'postgres', # alternative way to specify database
],
},
clean_if_exists => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/clean_if_exists.sql",
- '-c',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/clean_if_exists.sql",
+ '--clean',
'--if-exists',
- '--encoding=UTF8', # no-op, just tests that option is accepted
+ '--encoding' => 'UTF8', # no-op, just for testing
'postgres',
],
},
column_inserts => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/column_inserts.sql", '-a',
+ '--file' => "$tempdir/column_inserts.sql",
+ '--data-only',
'--column-inserts', 'postgres',
],
},
createdb => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/createdb.sql",
- '-C',
- '-R', # no-op, just for testing
- '-v',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/createdb.sql",
+ '--create',
+ '--no-reconnect', # no-op, just for testing
+ '--verbose',
'postgres',
],
},
data_only => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/data_only.sql",
- '-a',
- '--superuser=test_superuser',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/data_only.sql",
+ '--data-only',
+ '--superuser' => 'test_superuser',
'--disable-triggers',
- '-v', # no-op, just make sure it works
+ '--verbose', # no-op, just make sure it works
'postgres',
],
},
defaults => {
dump_cmd => [
'pg_dump', '--no-sync',
- '-f', "$tempdir/defaults.sql",
+ '--file' => "$tempdir/defaults.sql",
'postgres',
],
},
defaults_no_public => {
database => 'regress_pg_dump_test',
dump_cmd => [
- 'pg_dump', '--no-sync', '-f', "$tempdir/defaults_no_public.sql",
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/defaults_no_public.sql",
'regress_pg_dump_test',
],
},
defaults_no_public_clean => {
database => 'regress_pg_dump_test',
dump_cmd => [
- 'pg_dump', '--no-sync', '-c', '-f',
- "$tempdir/defaults_no_public_clean.sql",
+ 'pg_dump', '--no-sync',
+ '--clean',
+ '--file' => "$tempdir/defaults_no_public_clean.sql",
'regress_pg_dump_test',
],
},
defaults_public_owner => {
database => 'regress_public_owner',
dump_cmd => [
- 'pg_dump', '--no-sync', '-f',
- "$tempdir/defaults_public_owner.sql",
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/defaults_public_owner.sql",
'regress_public_owner',
],
},
defaults_custom_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fc',
- "--file=$tempdir/defaults_custom_format.dump", 'postgres',
+ 'pg_dump',
+ '--format' => 'custom',
+ '--file' => "$tempdir/defaults_custom_format.dump",
+ 'postgres',
],
restore_cmd => [
- 'pg_restore', '-Fc',
- "--file=$tempdir/defaults_custom_format.sql",
+ 'pg_restore',
+ '--format' => 'custom',
+ '--file' => "$tempdir/defaults_custom_format.sql",
"$tempdir/defaults_custom_format.dump",
],
command_like => {
- command =>
- [ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
+ command => [
+ 'pg_restore', '--list',
+ "$tempdir/defaults_custom_format.dump",
+ ],
expected => $supports_gzip
? qr/Compression: gzip/
: qr/Compression: none/,
defaults_dir_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fd',
- "--file=$tempdir/defaults_dir_format", 'postgres',
+ 'pg_dump',
+ '--format' => 'directory',
+ '--file' => "$tempdir/defaults_dir_format",
+ 'postgres',
],
restore_cmd => [
- 'pg_restore', '-Fd',
- "--file=$tempdir/defaults_dir_format.sql",
+ 'pg_restore',
+ '--format' => 'directory',
+ '--file' => "$tempdir/defaults_dir_format.sql",
"$tempdir/defaults_dir_format",
],
command_like => {
command =>
- [ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
+ [ 'pg_restore', '--list', "$tempdir/defaults_dir_format", ],
expected => $supports_gzip ? qr/Compression: gzip/
: qr/Compression: none/,
name => 'data content is gzip-compressed by default',
defaults_parallel => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fd', '-j2', "--file=$tempdir/defaults_parallel",
+ 'pg_dump',
+ '--format' => 'directory',
+ '--jobs' => 2,
+ '--file' => "$tempdir/defaults_parallel",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/defaults_parallel.sql",
+ '--file' => "$tempdir/defaults_parallel.sql",
"$tempdir/defaults_parallel",
],
},
defaults_tar_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Ft',
- "--file=$tempdir/defaults_tar_format.tar", 'postgres',
+ 'pg_dump',
+ '--format' => 'tar',
+ '--file' => "$tempdir/defaults_tar_format.tar",
+ 'postgres',
],
restore_cmd => [
'pg_restore',
- '--format=tar',
- "--file=$tempdir/defaults_tar_format.sql",
+ '--format' => 'tar',
+ '--file' => "$tempdir/defaults_tar_format.sql",
"$tempdir/defaults_tar_format.tar",
],
},
exclude_dump_test_schema => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/exclude_dump_test_schema.sql",
- '--exclude-schema=dump_test', 'postgres',
+ '--file' => "$tempdir/exclude_dump_test_schema.sql",
+ '--exclude-schema' => 'dump_test',
+ 'postgres',
],
},
exclude_test_table => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/exclude_test_table.sql",
- '--exclude-table=dump_test.test_table', 'postgres',
+ '--file' => "$tempdir/exclude_test_table.sql",
+ '--exclude-table' => 'dump_test.test_table',
+ 'postgres',
],
},
exclude_measurement => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/exclude_measurement.sql",
- '--exclude-table-and-children=dump_test.measurement',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/exclude_measurement.sql",
+ '--exclude-table-and-children' => 'dump_test.measurement',
'postgres',
],
},
exclude_measurement_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/exclude_measurement_data.sql",
- '--exclude-table-data-and-children=dump_test.measurement',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/exclude_measurement_data.sql",
+ '--exclude-table-data-and-children' => 'dump_test.measurement',
'--no-unlogged-table-data',
'postgres',
],
},
exclude_test_table_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/exclude_test_table_data.sql",
- '--exclude-table-data=dump_test.test_table',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/exclude_test_table_data.sql",
+ '--exclude-table-data' => 'dump_test.test_table',
'--no-unlogged-table-data',
'postgres',
],
inserts => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/inserts.sql", '-a',
+ '--file' => "$tempdir/inserts.sql",
+ '--data-only',
'--inserts', 'postgres',
],
},
pg_dumpall_globals => {
dump_cmd => [
- 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_globals.sql",
- '-g', '--no-sync',
+ 'pg_dumpall',
+ '--verbose',
+ '--file' => "$tempdir/pg_dumpall_globals.sql",
+ '--globals-only',
+ '--no-sync',
],
},
pg_dumpall_globals_clean => {
dump_cmd => [
- 'pg_dumpall', "--file=$tempdir/pg_dumpall_globals_clean.sql",
- '-g', '-c', '--no-sync',
+ 'pg_dumpall',
+ '--file' => "$tempdir/pg_dumpall_globals_clean.sql",
+ '--globals-only',
+ '--clean',
+ '--no-sync',
],
},
pg_dumpall_dbprivs => {
dump_cmd => [
'pg_dumpall', '--no-sync',
- "--file=$tempdir/pg_dumpall_dbprivs.sql",
+ '--file' => "$tempdir/pg_dumpall_dbprivs.sql",
],
},
pg_dumpall_exclude => {
dump_cmd => [
- 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_exclude.sql",
- '--exclude-database', '*dump_test*', '--no-sync',
+ 'pg_dumpall',
+ '--verbose',
+ '--file' => "$tempdir/pg_dumpall_exclude.sql",
+ '--exclude-database' => '*dump_test*',
+ '--no-sync',
],
},
no_toast_compression => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_toast_compression.sql",
- '--no-toast-compression', 'postgres',
+ '--file' => "$tempdir/no_toast_compression.sql",
+ '--no-toast-compression',
+ 'postgres',
],
},
no_large_objects => {
dump_cmd => [
- 'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
- '-B', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/no_large_objects.sql",
+ '--no-large-objects',
+ 'postgres',
],
},
no_privs => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_privs.sql", '-x',
+ '--file' => "$tempdir/no_privs.sql",
+ '--no-privileges',
'postgres',
],
},
no_owner => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_owner.sql", '-O',
+ '--file' => "$tempdir/no_owner.sql",
+ '--no-owner',
'postgres',
],
},
no_table_access_method => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_table_access_method.sql",
- '--no-table-access-method', 'postgres',
+ '--file' => "$tempdir/no_table_access_method.sql",
+ '--no-table-access-method',
+ 'postgres',
],
},
only_dump_test_schema => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/only_dump_test_schema.sql",
- '--schema=dump_test', 'postgres',
+ '--file' => "$tempdir/only_dump_test_schema.sql",
+ '--schema' => 'dump_test',
+ 'postgres',
],
},
only_dump_test_table => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/only_dump_test_table.sql",
- '--table=dump_test.test_table',
- '--lock-wait-timeout='
- . (1000 * $PostgreSQL::Test::Utils::timeout_default),
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/only_dump_test_table.sql",
+ '--table' => 'dump_test.test_table',
+ '--lock-wait-timeout' =>
+ (1000 * $PostgreSQL::Test::Utils::timeout_default),
'postgres',
],
},
only_dump_measurement => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/only_dump_measurement.sql",
- '--table-and-children=dump_test.measurement',
- '--lock-wait-timeout='
- . (1000 * $PostgreSQL::Test::Utils::timeout_default),
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/only_dump_measurement.sql",
+ '--table-and-children' => 'dump_test.measurement',
+ '--lock-wait-timeout' =>
+ (1000 * $PostgreSQL::Test::Utils::timeout_default),
'postgres',
],
},
role => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/role.sql",
- '--role=regress_dump_test_role',
- '--schema=dump_test_second_schema',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/role.sql",
+ '--role' => 'regress_dump_test_role',
+ '--schema' => 'dump_test_second_schema',
'postgres',
],
},
role_parallel => {
test_key => 'role',
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- '--format=directory',
- '--jobs=2',
- "--file=$tempdir/role_parallel",
- '--role=regress_dump_test_role',
- '--schema=dump_test_second_schema',
+ 'pg_dump', '--no-sync',
+ '--format' => 'directory',
+ '--jobs' => '2',
+ '--file' => "$tempdir/role_parallel",
+ '--role' => 'regress_dump_test_role',
+ '--schema' => 'dump_test_second_schema',
'postgres',
],
restore_cmd => [
- 'pg_restore', "--file=$tempdir/role_parallel.sql",
+ 'pg_restore',
+ '--file' => "$tempdir/role_parallel.sql",
"$tempdir/role_parallel",
],
},
rows_per_insert => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/rows_per_insert.sql",
- '-a',
- '--rows-per-insert=4',
- '--table=dump_test.test_table',
- '--table=dump_test.test_fourth_table',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/rows_per_insert.sql",
+ '--data-only',
+ '--rows-per-insert' => '4',
+ '--table' => 'dump_test.test_table',
+ '--table' => 'dump_test.test_fourth_table',
'postgres',
],
},
schema_only => {
dump_cmd => [
- 'pg_dump', '--format=plain',
- "--file=$tempdir/schema_only.sql", '--no-sync',
- '-s', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--format' => 'plain',
+ '--file' => "$tempdir/schema_only.sql",
+ '--schema-only',
+ 'postgres',
],
},
section_pre_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_pre_data.sql",
- '--section=pre-data', '--no-sync',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/section_pre_data.sql",
+ '--section' => 'pre-data',
'postgres',
],
},
section_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_data.sql",
- '--section=data', '--no-sync',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/section_data.sql",
+ '--section' => 'data',
'postgres',
],
},
section_post_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_post_data.sql",
- '--section=post-data', '--no-sync', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/section_post_data.sql",
+ '--section' => 'post-data',
+ 'postgres',
],
},
test_schema_plus_large_objects => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/test_schema_plus_large_objects.sql",
-
- '--schema=dump_test', '-b', '-B', '--no-sync', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/test_schema_plus_large_objects.sql",
+ '--schema' => 'dump_test',
+ '--large-objects',
+ '--no-large-objects',
+ 'postgres',
],
},);
# Test connecting to a non-existent database
command_fails_like(
- [ 'pg_dump', '-p', "$port", 'qqq' ],
+ [ 'pg_dump', '--port' => $port, 'qqq' ],
qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/,
'connecting to a non-existent database');
# Test connecting to an invalid database
$node->command_fails_like(
- [ 'pg_dump', '-d', 'regression_invalid' ],
+ [ 'pg_dump', '--dbname' => 'regression_invalid' ],
qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/,
'connecting to an invalid database');
# Test connecting with an unprivileged user
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--role=regress_dump_test_role' ],
+ [ 'pg_dump', '--port' => $port, '--role' => 'regress_dump_test_role' ],
qr/\Qpg_dump: error: query failed: ERROR: permission denied for\E/,
'connecting with an unprivileged user');
# Test dumping a non-existent schema, table, and patterns with --strict-names
command_fails_like(
- [ 'pg_dump', '-p', "$port", '-n', 'nonexistent' ],
+ [ 'pg_dump', '--port' => $port, '--schema' => 'nonexistent' ],
qr/\Qpg_dump: error: no matching schemas were found\E/,
'dumping a non-existent schema');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '-t', 'nonexistent' ],
+ [ 'pg_dump', '--port' => $port, '--table' => 'nonexistent' ],
qr/\Qpg_dump: error: no matching tables were found\E/,
'dumping a non-existent table');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--strict-names', '-n', 'nonexistent*' ],
+ [
+ 'pg_dump',
+ '--port' => $port,
+ '--strict-names',
+ '--schema' => 'nonexistent*'
+ ],
qr/\Qpg_dump: error: no matching schemas were found for pattern\E/,
'no matching schemas');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--strict-names', '-t', 'nonexistent*' ],
+ [
+ 'pg_dump',
+ '--port' => $port,
+ '--strict-names',
+ '--table' => 'nonexistent*'
+ ],
qr/\Qpg_dump: error: no matching tables were found for pattern\E/,
'no matching tables');
# Test invalid multipart database names
$node->command_fails_like(
- [ 'pg_dumpall', '--exclude-database', '.' ],
+ [ 'pg_dumpall', '--exclude-database' => '.' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
'pg_dumpall: option --exclude-database rejects multipart pattern "."');
$node->command_fails_like(
- [ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ],
+ [ 'pg_dumpall', '--exclude-database' => 'myhost.mydb' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
'pg_dumpall: option --exclude-database rejects multipart database names');
##############################################################
# Test dumping pg_catalog (for research -- cannot be reloaded)
-$node->command_ok([ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ],
+$node->command_ok(
+ [ 'pg_dump', '--port' => $port, '--schema' => 'pg_catalog' ],
'pg_dump: option -n pg_catalog');
#########################################
# Test valid database exclusion patterns
$node->command_ok(
- [ 'pg_dumpall', '-p', "$port", '--exclude-database', '"myhost.mydb"' ],
+ [
+ 'pg_dumpall',
+ '--port' => $port,
+ '--exclude-database' => '"myhost.mydb"'
+ ],
'pg_dumpall: option --exclude-database handles database names with embedded dots'
);
# Test invalid multipart schema names
$node->command_fails_like(
- [ 'pg_dump', '--schema', 'myhost.mydb.myschema' ],
+ [ 'pg_dump', '--schema' => 'myhost.mydb.myschema' ],
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
'pg_dump: option --schema rejects three-part schema names');
$node->command_fails_like(
- [ 'pg_dump', '--schema', 'otherdb.myschema' ],
+ [ 'pg_dump', '--schema' => 'otherdb.myschema' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
'pg_dump: option --schema rejects cross-database multipart schema names');
$node->command_fails_like(
- [ 'pg_dump', '--schema', '.' ],
+ [ 'pg_dump', '--schema' => '.' ],
qr/pg_dump: error: cross-database references are not implemented: \./,
'pg_dump: option --schema rejects degenerate two-part schema name: "."');
$node->command_fails_like(
- [ 'pg_dump', '--schema', '"some.other.db".myschema' ],
+ [ 'pg_dump', '--schema' => '"some.other.db".myschema' ],
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.myschema/,
'pg_dump: option --schema rejects cross-database multipart schema names with embedded dots'
);
$node->command_fails_like(
- [ 'pg_dump', '--schema', '..' ],
+ [ 'pg_dump', '--schema' => '..' ],
qr/pg_dump: error: improper qualified name \(too many dotted names\): \.\./,
'pg_dump: option --schema rejects degenerate three-part schema name: ".."'
);
# Test invalid multipart relation names
$node->command_fails_like(
- [ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ],
+ [ 'pg_dump', '--table' => 'myhost.mydb.myschema.mytable' ],
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
'pg_dump: option --table rejects four-part table names');
$node->command_fails_like(
- [ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ],
+ [ 'pg_dump', '--table' => 'otherdb.pg_catalog.pg_class' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
'pg_dump: option --table rejects cross-database three part table names');
command_fails_like(
[
- 'pg_dump', '-p', "$port", '--table',
- '"some.other.db".pg_catalog.pg_class'
+ 'pg_dump',
+ '--port' => $port,
+ '--table' => '"some.other.db".pg_catalog.pg_class'
],
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
'pg_dump: option --table rejects cross-database three part table names with embedded dots'
$node->safe_psql('postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1");
command_fails_like(
- [ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ],
+ [
+ "pg_dump",
+ '--port' => $port,
+ '--include-foreign-data' => 's0',
+ 'postgres'
+ ],
qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: detail: Query was: .*t0/,
"correctly fails to dump a foreign table from a dummy FDW");
command_ok(
- [ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ],
+ [
+ "pg_dump",
+ '--port' => $port,
+ '--data-only',
+ '--include-foreign-data' => 's2',
+ 'postgres'
+ ],
"dump foreign server with no tables");
done_testing();
$node->command_ok(
[
- 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', "$backupdir/dump1",
- $node->connstr($dbname1)
+ 'pg_dump',
+ '--format' => 'directory',
+ '--no-sync',
+ '--jobs' => 2,
+ '--file' => "$backupdir/dump1",
+ $node->connstr($dbname1),
],
'parallel dump');
$node->command_ok(
[
- 'pg_restore', '-v',
- '-d', $node->connstr($dbname2),
- '-j3', "$backupdir/dump1"
+ 'pg_restore', '--verbose',
+ '--dbname' => $node->connstr($dbname2),
+ '--jobs' => 3,
+ "$backupdir/dump1",
],
'parallel restore');
$node->command_ok(
[
- 'pg_dump', '-Fd',
- '--no-sync', '-j2',
- '-f', "$backupdir/dump2",
- '--inserts', $node->connstr($dbname1)
+ 'pg_dump',
+ '--format' => 'directory',
+ '--no-sync',
+ '--jobs' => 2,
+ '--file' => "$backupdir/dump2",
+ '--inserts',
+ $node->connstr($dbname1),
],
'parallel dump as inserts');
$node->command_ok(
[
- 'pg_restore', '-v',
- '-d', $node->connstr($dbname3),
- '-j3', "$backupdir/dump2"
+ 'pg_restore', '--verbose',
+ '--dbname' => $node->connstr($dbname3),
+ '--jobs' => 3,
+ "$backupdir/dump2",
],
'parallel restore as inserts');
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"filter file without patterns");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter patterns as well as comments and whitespace");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"filter file without patterns");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with exclusion of a single table");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with wildcard in pattern");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with multiline names requiring quoting");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"exclude the public schema");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "--filter=$tempdir/inputfile2.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--filter' => "$tempdir/inputfile2.txt",
+ 'postgres'
],
"exclude the public schema with multiple filters");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/pg_dump: error: no matching foreign servers were found for pattern/,
"dump nonexisting foreign server");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump foreign_data with filter");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/exclude filter for "foreign data" is not allowed/,
"erroneously exclude foreign server");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/invalid filter command/,
"invalid syntax: incorrect filter command");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/unsupported filter object type: "xxx"/,
"invalid syntax: invalid object type specified, should be table, schema, foreign_data or data"
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/missing object name/,
"invalid syntax: missing object identifier pattern");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/no matching tables were found/,
"invalid syntax: extra content after object identifier pattern");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
'--strict-names', 'postgres'
],
"strict names with matching pattern");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
'--strict-names', 'postgres'
],
qr/no matching tables were found/,
command_ok(
[
- 'pg_dumpall', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_dumpall',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
"dump tables with exclusion of a database");
# --globals-only with exclusions
command_fails_like(
[
- 'pg_dumpall', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
+ 'pg_dumpall',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
'--globals-only'
],
qr/\Qpg_dumpall: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
command_fails_like(
[
- 'pg_dumpall', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_dumpall',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
qr/invalid filter command/,
"invalid syntax: incorrect filter command");
command_fails_like(
[
- 'pg_dumpall', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_dumpall',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
qr/unsupported filter object type: "xxx"/,
"invalid syntax: exclusion of non-existing object type");
command_fails_like(
[
- 'pg_dumpall', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_dumpall',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
qr/pg_dumpall: error: invalid format in filter/,
"invalid syntax: exclusion of unsupported object type");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', "$tempdir/filter_test.dump",
- "-Fc", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => "$tempdir/filter_test.dump",
+ '--format' => 'custom',
+ 'postgres'
],
"dump all tables");
command_ok(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "-Fc", "$tempdir/filter_test.dump"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--format' => 'custom',
+ "$tempdir/filter_test.dump"
],
"restore tables with filter");
command_fails_like(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
qr/include filter for "table data" is not allowed/,
"invalid syntax: inclusion of unallowed object");
command_fails_like(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
qr/include filter for "extension" is not allowed/,
"invalid syntax: inclusion of unallowed object");
command_fails_like(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
qr/exclude filter for "extension" is not allowed/,
"invalid syntax: exclusion of unallowed object");
command_fails_like(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt"
],
qr/exclude filter for "table data" is not allowed/,
"invalid syntax: exclusion of unallowed object");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', "$tempdir/filter_test.dump",
- "-Fc", 'sourcedb'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => "$tempdir/filter_test.dump",
+ '--format' => 'custom',
+ 'sourcedb'
],
"dump all objects from sourcedb");
command_ok(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "-Fc", "$tempdir/filter_test.dump"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--format' => 'custom',
+ "$tempdir/filter_test.dump"
],
"restore function with filter");
command_ok(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "-Fc", "$tempdir/filter_test.dump"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--format' => 'custom',
+ "$tempdir/filter_test.dump"
],
"restore function with filter");
command_ok(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "-Fc", "$tempdir/filter_test.dump"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--format' => 'custom',
+ "$tempdir/filter_test.dump"
],
"restore function with filter");
command_ok(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "-Fc", "$tempdir/filter_test.dump"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--format' => 'custom',
+ "$tempdir/filter_test.dump"
],
"restore function with filter");
command_ok(
[
- 'pg_restore', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "-Fc", "$tempdir/filter_test.dump"
+ 'pg_restore',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--format' => 'custom',
+ "$tempdir/filter_test.dump"
],
"restore function with filter");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"filter file without patterns");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"filter file without patterns");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"filter file without patterns");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/pg_dump: error: no matching extensions were found/,
"dump nonexisting extension");
my $dst_bootstrap_super = 'boot';
my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init(extra =>
- [ '-U', $src_bootstrap_super, '--locale=C', '--encoding=LATIN1' ]);
+$node->init(
+ extra => [
+ '--username' => $src_bootstrap_super,
+ '--locale' => 'C',
+ '--encoding' => 'LATIN1',
+ ]);
# prep pg_hba.conf and pg_ident.conf
$node->run_log(
[
- $ENV{PG_REGRESS}, '--config-auth',
- $node->data_dir, '--user',
- $src_bootstrap_super, '--create-role',
- "$username1,$username2,$username3,$username4"
+ $ENV{PG_REGRESS},
+ '--config-auth' => $node->data_dir,
+ '--user' => $src_bootstrap_super,
+ '--create-role' => "$username1,$username2,$username3,$username4",
]);
$node->start;
my $plain = "$backupdir/plain.sql";
my $dirfmt = "$backupdir/dirfmt";
-$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]);
$node->run_log(
- [ 'createuser', '-U', $src_bootstrap_super, '-s', $username1 ]);
-$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname2 ]);
+ [ 'createdb', '--username' => $src_bootstrap_super, $dbname1 ]);
+$node->run_log(
+ [
+ 'createuser',
+ '--username' => $src_bootstrap_super,
+ '--superuser',
+ $username1,
+ ]);
+$node->run_log(
+ [ 'createdb', '--username' => $src_bootstrap_super, $dbname2 ]);
$node->run_log(
- [ 'createuser', '-U', $src_bootstrap_super, '-s', $username2 ]);
-$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname3 ]);
+ [
+ 'createuser',
+ '--username' => $src_bootstrap_super,
+ '--superuser',
+ $username2,
+ ]);
$node->run_log(
- [ 'createuser', '-U', $src_bootstrap_super, '-s', $username3 ]);
-$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname4 ]);
+ [ 'createdb', '--username' => $src_bootstrap_super, $dbname3 ]);
$node->run_log(
- [ 'createuser', '-U', $src_bootstrap_super, '-s', $username4 ]);
+ [
+ 'createuser',
+ '--username' => $src_bootstrap_super,
+ '--superuser',
+ $username3,
+ ]);
+$node->run_log(
+ [ 'createdb', '--username' => $src_bootstrap_super, $dbname4 ]);
+$node->run_log(
+ [
+ 'createuser',
+ '--username' => $src_bootstrap_super,
+ '--superuser',
+ $username4,
+ ]);
-# For these tests, pg_dumpall -r is used because it produces a short
-# dump.
+# For these tests, pg_dumpall --roles-only is used because it produces
+# a short dump.
$node->command_ok(
[
- 'pg_dumpall', '-r', '-f', $discard, '--dbname',
- $node->connstr($dbname1),
- '-U', $username4
+ 'pg_dumpall', '--roles-only',
+ '--file' => $discard,
+ '--dbname' => $node->connstr($dbname1),
+ '--username' => $username4,
],
'pg_dumpall with long ASCII name 1');
$node->command_ok(
[
- 'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname',
- $node->connstr($dbname2),
- '-U', $username3
+ 'pg_dumpall', '--no-sync', '--roles-only',
+ '--file' => $discard,
+ '--dbname' => $node->connstr($dbname2),
+ '--username' => $username3,
],
'pg_dumpall with long ASCII name 2');
$node->command_ok(
[
- 'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname',
- $node->connstr($dbname3),
- '-U', $username2
+ 'pg_dumpall', '--no-sync', '--roles-only',
+ '--file' => $discard,
+ '--dbname' => $node->connstr($dbname3),
+ '--username' => $username2,
],
'pg_dumpall with long ASCII name 3');
$node->command_ok(
[
- 'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname',
- $node->connstr($dbname4),
- '-U', $username1
+ 'pg_dumpall', '--no-sync', '--roles-only',
+ '--file' => $discard,
+ '--dbname' => $node->connstr($dbname4),
+ '--username' => $username1,
],
'pg_dumpall with long ASCII name 4');
$node->command_ok(
[
- 'pg_dumpall', '-U',
- $src_bootstrap_super, '--no-sync',
- '-r', '-l',
- 'dbname=template1'
+ 'pg_dumpall', '--no-sync', '--roles-only',
+ '--username' => $src_bootstrap_super,
+ '--dbname' => 'dbname=template1',
],
- 'pg_dumpall -l accepts connection string');
+ 'pg_dumpall --dbname accepts connection string');
-$node->run_log([ 'createdb', '-U', $src_bootstrap_super, "foo\n\rbar" ]);
+$node->run_log(
+ [ 'createdb', '--username' => $src_bootstrap_super, "foo\n\rbar" ]);
-# not sufficient to use -r here
+# not sufficient to use --roles-only here
$node->command_fails(
- [ 'pg_dumpall', '-U', $src_bootstrap_super, '--no-sync', '-f', $discard ],
+ [
+ 'pg_dumpall', '--no-sync',
+ '--username' => $src_bootstrap_super,
+ '--file' => $discard,
+ ],
'pg_dumpall with \n\r in database name');
-$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, "foo\n\rbar" ]);
+$node->run_log(
+ [ 'dropdb', '--username' => $src_bootstrap_super, "foo\n\rbar" ]);
# make a table, so the parallel worker has something to dump
$node->safe_psql(
$dbname1,
'CREATE TABLE t0()',
- extra_params => [ '-U', $src_bootstrap_super ]);
+ extra_params => [ '--username' => $src_bootstrap_super ]);
# XXX no printed message when this fails, just SIGPIPE termination
$node->command_ok(
[
- 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt, '-U', $username1,
- $node->connstr($dbname1)
+ 'pg_dump',
+ '--format' => 'directory',
+ '--no-sync',
+ '--jobs' => 2,
+ '--file' => $dirfmt,
+ '--username' => $username1,
+ $node->connstr($dbname1),
],
'parallel dump');
# recreate $dbname1 for restore test
-$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]);
-$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]);
+$node->run_log([ 'dropdb', '--username' => $src_bootstrap_super, $dbname1 ]);
+$node->run_log(
+ [ 'createdb', '--username' => $src_bootstrap_super, $dbname1 ]);
$node->command_ok(
[
- 'pg_restore', '-v', '-d', 'template1',
- '-j2', '-U', $username1, $dirfmt
+ 'pg_restore',
+ '--verbose',
+ '--dbname' => 'template1',
+ '--jobs' => 2,
+ '--username' => $username1,
+ $dirfmt,
],
'parallel restore');
-$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]);
+$node->run_log([ 'dropdb', '--username' => $src_bootstrap_super, $dbname1 ]);
$node->command_ok(
[
- 'pg_restore', '-C', '-v', '-d',
- 'template1', '-j2', '-U', $username1,
- $dirfmt
+ 'pg_restore',
+ '--create',
+ '--verbose',
+ '--dbname' => 'template1',
+ '--jobs' => 2,
+ '--username' => $username1,
+ $dirfmt,
],
'parallel restore with create');
$node->command_ok(
- [ 'pg_dumpall', '--no-sync', '-f', $plain, '-U', $username1 ],
+ [
+ 'pg_dumpall',
+ '--no-sync',
+ '--file' => $plain,
+ '--username' => $username1,
+ ],
'take full dump');
system_log('cat', $plain);
my ($stderr, $result);
my $envar_node = PostgreSQL::Test::Cluster->new('destination_envar');
$envar_node->init(
- extra =>
- [ '-U', $dst_bootstrap_super, '--locale=C', '--encoding=LATIN1' ],
+ extra => [
+ '--username' => $dst_bootstrap_super,
+ '--locale' => 'C',
+ '--encoding' => 'LATIN1',
+ ],
auth_extra =>
- [ '--user', $dst_bootstrap_super, '--create-role', $restore_super ]);
+ [ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ],
+);
$envar_node->start;
# make superuser for restore
$envar_node->run_log(
- [ 'createuser', '-U', $dst_bootstrap_super, '-s', $restore_super ]);
+ [
+ 'createuser',
+ '--username' => $dst_bootstrap_super,
+ '--superuser', $restore_super,
+ ]);
{
local $ENV{PGPORT} = $envar_node->port;
local $ENV{PGUSER} = $restore_super;
- $result = run_log([ 'psql', '-X', '-f', $plain ], '2>', \$stderr);
+ $result = run_log([ 'psql', '--no-psqlrc', '--file' => $plain ],
+ '2>' => \$stderr);
}
ok($result,
'restore full dump using environment variables for connection parameters'
my $cmdline_node = PostgreSQL::Test::Cluster->new('destination_cmdline');
$cmdline_node->init(
- extra =>
- [ '-U', $dst_bootstrap_super, '--locale=C', '--encoding=LATIN1' ],
+ extra => [
+ '--username' => $dst_bootstrap_super,
+ '--locale' => 'C',
+ '--encoding' => 'LATIN1',
+ ],
auth_extra =>
- [ '--user', $dst_bootstrap_super, '--create-role', $restore_super ]);
+ [ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ],
+);
$cmdline_node->start;
$cmdline_node->run_log(
- [ 'createuser', '-U', $dst_bootstrap_super, '-s', $restore_super ]);
+ [
+ 'createuser',
+ '--username' => $dst_bootstrap_super,
+ '--superuser',
+ $restore_super,
+ ]);
{
$result = run_log(
[
- 'psql', '-p', $cmdline_node->port, '-U',
- $restore_super, '-X', '-f', $plain
+ 'psql',
+ '--port' => $cmdline_node->port,
+ '--username' => $restore_super,
+ '--no-psqlrc',
+ '--file' => $plain,
],
- '2>',
- \$stderr);
+ '2>' => \$stderr);
}
ok($result,
'restore full dump with command-line options for connection parameters');
'check PGDATA permissions');
}
-command_ok([ 'pg_resetwal', '-D', $node->data_dir ], 'pg_resetwal runs');
+command_ok([ 'pg_resetwal', '--pgdata' => $node->data_dir ],
+ 'pg_resetwal runs');
$node->start;
is($node->safe_psql("postgres", "SELECT 1;"),
1, 'server running and working after reset');
qr/database server was not shut down cleanly/,
'does not run after immediate shutdown');
command_ok(
- [ 'pg_resetwal', '-f', $node->data_dir ],
+ [ 'pg_resetwal', '--force', $node->data_dir ],
'runs after immediate shutdown with force');
$node->start;
is($node->safe_psql("postgres", "SELECT 1;"),
# error cases
# -c
command_fails_like(
- [ 'pg_resetwal', '-c', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-c' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -c/,
'fails with incorrect -c option');
command_fails_like(
- [ 'pg_resetwal', '-c', '10,bar', $node->data_dir ],
+ [ 'pg_resetwal', '-c' => '10,bar', $node->data_dir ],
qr/error: invalid argument for option -c/,
'fails with incorrect -c option part 2');
command_fails_like(
- [ 'pg_resetwal', '-c', '1,10', $node->data_dir ],
+ [ 'pg_resetwal', '-c' => '1,10', $node->data_dir ],
qr/greater than/,
- 'fails with -c value 1 part 1');
+ 'fails with -c ids value 1 part 1');
command_fails_like(
- [ 'pg_resetwal', '-c', '10,1', $node->data_dir ],
+ [ 'pg_resetwal', '-c' => '10,1', $node->data_dir ],
qr/greater than/,
'fails with -c value 1 part 2');
# -e
command_fails_like(
- [ 'pg_resetwal', '-e', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-e' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -e/,
'fails with incorrect -e option');
command_fails_like(
- [ 'pg_resetwal', '-e', '-1', $node->data_dir ],
+ [ 'pg_resetwal', '-e' => '-1', $node->data_dir ],
qr/must not be -1/,
'fails with -e value -1');
# -l
command_fails_like(
- [ 'pg_resetwal', '-l', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-l' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -l/,
'fails with incorrect -l option');
# -m
command_fails_like(
- [ 'pg_resetwal', '-m', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-m' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -m/,
'fails with incorrect -m option');
command_fails_like(
- [ 'pg_resetwal', '-m', '10,bar', $node->data_dir ],
+ [ 'pg_resetwal', '-m' => '10,bar', $node->data_dir ],
qr/error: invalid argument for option -m/,
'fails with incorrect -m option part 2');
command_fails_like(
- [ 'pg_resetwal', '-m', '0,10', $node->data_dir ],
+ [ 'pg_resetwal', '-m' => '0,10', $node->data_dir ],
qr/must not be 0/,
'fails with -m value 0 part 1');
command_fails_like(
- [ 'pg_resetwal', '-m', '10,0', $node->data_dir ],
+ [ 'pg_resetwal', '-m' => '10,0', $node->data_dir ],
qr/must not be 0/,
'fails with -m value 0 part 2');
# -o
command_fails_like(
- [ 'pg_resetwal', '-o', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-o' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -o/,
'fails with incorrect -o option');
command_fails_like(
- [ 'pg_resetwal', '-o', '0', $node->data_dir ],
+ [ 'pg_resetwal', '-o' => '0', $node->data_dir ],
qr/must not be 0/,
'fails with -o value 0');
# -O
command_fails_like(
- [ 'pg_resetwal', '-O', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-O' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -O/,
'fails with incorrect -O option');
command_fails_like(
- [ 'pg_resetwal', '-O', '-1', $node->data_dir ],
+ [ 'pg_resetwal', '-O' => '-1', $node->data_dir ],
qr/must not be -1/,
'fails with -O value -1');
# --wal-segsize
command_fails_like(
- [ 'pg_resetwal', '--wal-segsize', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '--wal-segsize' => 'foo', $node->data_dir ],
qr/error: invalid value/,
'fails with incorrect --wal-segsize option');
command_fails_like(
- [ 'pg_resetwal', '--wal-segsize', '13', $node->data_dir ],
+ [ 'pg_resetwal', '--wal-segsize' => '13', $node->data_dir ],
qr/must be a power/,
'fails with invalid --wal-segsize value');
# -u
command_fails_like(
- [ 'pg_resetwal', '-u', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-u' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -u/,
'fails with incorrect -u option');
command_fails_like(
- [ 'pg_resetwal', '-u', '1', $node->data_dir ],
+ [ 'pg_resetwal', '-u' => '1', $node->data_dir ],
qr/must be greater than/,
'fails with -u value too small');
# -x
command_fails_like(
- [ 'pg_resetwal', '-x', 'foo', $node->data_dir ],
+ [ 'pg_resetwal', '-x' => 'foo', $node->data_dir ],
qr/error: invalid argument for option -x/,
'fails with incorrect -x option');
command_fails_like(
- [ 'pg_resetwal', '-x', '1', $node->data_dir ],
+ [ 'pg_resetwal', '-x' => '1', $node->data_dir ],
qr/must be greater than/,
'fails with -x value too small');
# run with control override options
-my $out = (run_command([ 'pg_resetwal', '-n', $node->data_dir ]))[0];
+my $out = (run_command([ 'pg_resetwal', '--dry-run', $node->data_dir ]))[0];
$out =~ /^Database block size: *(\d+)$/m or die;
my $blcksz = $1;
-my @cmd = ('pg_resetwal', '-D', $node->data_dir);
+my @cmd = ('pg_resetwal', '--pgdata' => $node->data_dir);
# some not-so-critical hardcoded values
-push @cmd, '-e', 1;
-push @cmd, '-l', '00000001000000320000004B';
-push @cmd, '-o', 100_000;
-push @cmd, '--wal-segsize', 1;
+push @cmd, '--epoch' => 1;
+push @cmd, '--next-wal-file' => '00000001000000320000004B';
+push @cmd, '--next-oid' => 100_000;
+push @cmd, '--wal-segsize' => 1;
# these use the guidance from the documentation
# XXX: Should there be a multiplier, similar to the other options?
# -c argument is "old,new"
push @cmd,
- '-c',
+ '--commit-timestamp-ids' =>
sprintf("%d,%d", hex($files[0]) == 0 ? 3 : hex($files[0]), hex($files[-1]));
@files = get_slru_files('pg_multixact/offsets');
$mult = 32 * $blcksz / 4;
-# -m argument is "new,old"
-push @cmd, '-m',
- sprintf("%d,%d",
+# --multixact-ids argument is "new,old"
+push @cmd,
+ '--multixact-ids' => sprintf("%d,%d",
(hex($files[-1]) + 1) * $mult,
hex($files[0]) == 0 ? 1 : hex($files[0] * $mult));
@files = get_slru_files('pg_multixact/members');
$mult = 32 * int($blcksz / 20) * 4;
-push @cmd, '-O', (hex($files[-1]) + 1) * $mult;
+push @cmd, '--multixact-offset' => (hex($files[-1]) + 1) * $mult;
@files = get_slru_files('pg_xact');
$mult = 32 * $blcksz * 4;
push @cmd,
- '-u', (hex($files[0]) == 0 ? 3 : hex($files[0]) * $mult),
- '-x', ((hex($files[-1]) + 1) * $mult);
+ '--oldest-transaction-id' =>
+ (hex($files[0]) == 0 ? 3 : hex($files[0]) * $mult),
+ '--next-transaction-id' => ((hex($files[-1]) + 1) * $mult);
-command_ok([ @cmd, '-n' ], 'runs with control override options, dry run');
+command_ok([ @cmd, '--dry-run' ],
+ 'runs with control override options, dry run');
command_ok(\@cmd, 'runs with control override options');
command_like(
- [ 'pg_resetwal', '-n', $node->data_dir ],
+ [ 'pg_resetwal', '--dry-run', $node->data_dir ],
qr/^Latest checkpoint's NextOID: *100000$/m,
'spot check that control changes were applied');
close $fh;
command_checks_all(
- [ 'pg_resetwal', '-n', $node->data_dir ],
+ [ 'pg_resetwal', '--dry-run', $node->data_dir ],
0,
[qr/pg_control version number/],
[
close $fh;
command_checks_all(
- [ 'pg_resetwal', '-n', $node->data_dir ],
+ [ 'pg_resetwal', '--dry-run', $node->data_dir ],
0,
[qr/pg_control version number/],
[
command_fails(
[
'pg_rewind', '--debug',
- '--source-pgdata', $standby_pgdata,
- '--target-pgdata', $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
+ '--target-pgdata' => $primary_pgdata,
'--no-sync'
],
'pg_rewind with running target');
command_fails(
[
'pg_rewind', '--debug',
- '--source-pgdata', $standby_pgdata,
- '--target-pgdata', $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
+ '--target-pgdata' => $primary_pgdata,
'--no-sync', '--no-ensure-shutdown'
],
'pg_rewind --no-ensure-shutdown with running target');
command_fails(
[
'pg_rewind', '--debug',
- '--source-pgdata', $standby_pgdata,
- '--target-pgdata', $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
+ '--target-pgdata' => $primary_pgdata,
'--no-sync', '--no-ensure-shutdown'
],
'pg_rewind with unexpected running source');
command_ok(
[
'pg_rewind', '--debug',
- '--source-pgdata', $standby_pgdata,
- '--target-pgdata', $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
+ '--target-pgdata' => $primary_pgdata,
'--no-sync', '--dry-run'
],
'pg_rewind --dry-run');
my $standby_pgdata = PostgreSQL::Test::Utils::tempdir;
command_fails(
[
- 'pg_rewind', '--debug',
- '--target-pgdata', $primary_pgdata,
- '--source-pgdata', $standby_pgdata,
+ 'pg_rewind',
+ '--debug',
+ '--target-pgdata' => $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
'extra_arg1'
],
'too many arguments');
-command_fails([ 'pg_rewind', '--target-pgdata', $primary_pgdata ],
+command_fails([ 'pg_rewind', '--target-pgdata' => $primary_pgdata ],
'no source specified');
command_fails(
[
- 'pg_rewind', '--debug',
- '--target-pgdata', $primary_pgdata,
- '--source-pgdata', $standby_pgdata,
- '--source-server', 'incorrect_source'
+ 'pg_rewind',
+ '--debug',
+ '--target-pgdata' => $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
+ '--source-server' => 'incorrect_source'
],
'both remote and local sources specified');
command_fails(
[
- 'pg_rewind', '--debug',
- '--target-pgdata', $primary_pgdata,
- '--source-pgdata', $standby_pgdata,
+ 'pg_rewind',
+ '--debug',
+ '--target-pgdata' => $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
'--write-recovery-conf'
],
'no local source with --write-recovery-conf');
# recovery configuration automatically.
command_ok(
[
- 'pg_rewind', "--debug",
- "--source-server", $node_b->connstr('postgres'),
- "--target-pgdata=$node_c_pgdata", "--no-sync",
- "--write-recovery-conf"
+ 'pg_rewind',
+ '--debug',
+ '--source-server' => $node_b->connstr('postgres'),
+ '--target-pgdata' => $node_c_pgdata,
+ '--no-sync',
+ '--write-recovery-conf',
],
'pg_rewind remote');
}
command_ok(
[
- 'pg_rewind', "--source-server=$node_1_connstr",
- "--target-pgdata=$node_2_pgdata", "--debug"
+ 'pg_rewind',
+ '--source-server' => $node_1_connstr,
+ '--target-pgdata' => $node_2_pgdata,
+ '--debug',
],
'run pg_rewind');
my $ret = run_log(
[
'pg_rewind', '--debug',
- '--source-pgdata', $standby_pgdata,
- '--target-pgdata', $primary_pgdata,
+ '--source-pgdata' => $standby_pgdata,
+ '--target-pgdata' => $primary_pgdata,
'--no-sync',
],
'2>>',
my ($stdout, $stderr) = run_command(
[
'pg_rewind', '--debug',
- '--source-pgdata', $node_standby->data_dir,
- '--target-pgdata', $node_primary->data_dir,
+ '--source-pgdata' => $node_standby->data_dir,
+ '--target-pgdata' => $node_primary->data_dir,
'--no-sync',
]);
command_ok(
[
'pg_rewind',
- "--debug",
- "--source-pgdata=$standby_pgdata",
- "--target-pgdata=$primary_pgdata",
- "--no-sync",
- "--config-file",
- "$tmp_folder/primary-postgresql.conf.tmp"
+ '--debug',
+ '--source-pgdata' => $standby_pgdata,
+ '--target-pgdata' => $primary_pgdata,
+ '--no-sync',
+ '--config-file' => "$tmp_folder/primary-postgresql.conf.tmp",
],
'pg_rewind local');
}
# recovery configuration automatically.
command_ok(
[
- 'pg_rewind', "--debug",
- "--source-server", $standby_connstr,
- "--target-pgdata=$primary_pgdata", "--no-sync",
- "--write-recovery-conf", "--config-file",
- "$tmp_folder/primary-postgresql.conf.tmp"
+ 'pg_rewind',
+ '--debug',
+ '--source-server' => $standby_connstr,
+ '--target-pgdata' => $primary_pgdata,
+ '--no-sync',
+ '--write-recovery-conf',
+ '--config-file' => "$tmp_folder/primary-postgresql.conf.tmp",
],
'pg_rewind remote');
command_ok(
[
'pg_rewind',
- "--debug",
- "--source-pgdata=$standby_pgdata",
- "--target-pgdata=$primary_pgdata",
- "--no-sync",
- "--no-ensure-shutdown",
- "--restore-target-wal",
- "--config-file",
- "$primary_pgdata/postgresql.conf"
+ '--debug',
+ '--source-pgdata' => $standby_pgdata,
+ '--target-pgdata' => $primary_pgdata,
+ '--no-sync',
+ '--no-ensure-shutdown',
+ '--restore-target-wal',
+ '--config-file' => "$primary_pgdata/postgresql.conf",
],
'pg_rewind archive');
}
# Test invalid option combinations
command_fails_like(
- [ 'pg_test_fsync', '--secs-per-test', 'a' ],
+ [ 'pg_test_fsync', '--secs-per-test' => 'a' ],
qr/\Qpg_test_fsync: error: invalid argument for option --secs-per-test\E/,
'pg_test_fsync: invalid argument for option --secs-per-test');
command_fails_like(
- [ 'pg_test_fsync', '--secs-per-test', '0' ],
+ [ 'pg_test_fsync', '--secs-per-test' => '0' ],
qr/\Qpg_test_fsync: error: --secs-per-test must be in range 1..4294967295\E/,
'pg_test_fsync: --secs-per-test must be in range');
# Test invalid option combinations
command_fails_like(
- [ 'pg_test_timing', '--duration', 'a' ],
+ [ 'pg_test_timing', '--duration' => 'a' ],
qr/\Qpg_test_timing: invalid argument for option --duration\E/,
'pg_test_timing: invalid argument for option --duration');
command_fails_like(
- [ 'pg_test_timing', '--duration', '0' ],
+ [ 'pg_test_timing', '--duration' => '0' ],
qr/\Qpg_test_timing: --duration must be in range 1..4294967295\E/,
'pg_test_timing: --duration must be in range');
# max_replication_slots.
command_checks_all(
[
- 'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir,
- '-D', $new_sub->data_dir, '-b', $oldbindir,
- '-B', $newbindir, '-s', $new_sub->host,
- '-p', $old_sub->port, '-P', $new_sub->port,
- $mode, '--check',
+ 'pg_upgrade',
+ '--no-sync',
+ '--old-datadir' => $old_sub->data_dir,
+ '--new-datadir' => $new_sub->data_dir,
+ '--old-bindir' => $oldbindir,
+ '--new-bindir' => $newbindir,
+ '--socketdir' => $new_sub->host,
+ '--old-port' => $old_sub->port,
+ '--new-port' => $new_sub->port,
+ $mode,
+ '--check',
],
1,
[
command_fails(
[
- 'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir,
- '-D', $new_sub->data_dir, '-b', $oldbindir,
- '-B', $newbindir, '-s', $new_sub->host,
- '-p', $old_sub->port, '-P', $new_sub->port,
- $mode, '--check',
+ 'pg_upgrade',
+ '--no-sync',
+ '--old-datadir' => $old_sub->data_dir,
+ '--new-datadir' => $new_sub->data_dir,
+ '--old-bindir' => $oldbindir,
+ '--new-bindir' => $newbindir,
+ '--socketdir' => $new_sub->host,
+ '--old-port' => $old_sub->port,
+ '--new-port' => $new_sub->port,
+ $mode,
+ '--check',
],
'run of pg_upgrade --check for old instance with relation in \'d\' datasync(invalid) state and missing replication origin'
);
# ------------------------------------------------------
command_ok(
[
- 'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir,
- '-D', $new_sub->data_dir, '-b', $oldbindir,
- '-B', $newbindir, '-s', $new_sub->host,
- '-p', $old_sub->port, '-P', $new_sub->port,
+ 'pg_upgrade',
+ '--no-sync',
+ '--old-datadir' => $old_sub->data_dir,
+ '--new-datadir' => $new_sub->data_dir,
+ '--old-bindir' => $oldbindir,
+ '--new-bindir' => $newbindir,
+ '--socketdir' => $new_sub->host,
+ '--old-port' => $old_sub->port,
+ '--new-port' => $new_sub->port,
$mode
],
'run of pg_upgrade for old instance when the subscription tables are in init/ready state'
# but then try to use an alternate, nonexisting manifest
command_fails_like(
- [ 'pg_verifybackup', '-m', "$tempdir/not_the_manifest", $tempdir ],
+ [
+ 'pg_verifybackup',
+ '--manifest-path' => "$tempdir/not_the_manifest",
+ $tempdir,
+ ],
qr/could not open file.*\/not_the_manifest\"/,
'pg_verifybackup respects -m flag');
local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix;
$primary->command_ok(
[
- 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast',
- '-T', "${source_ts_path}=${backup_ts_path}"
+ 'pg_basebackup',
+ '--pgdata' => $backup_path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--tablespace-mapping' =>
+ "${source_ts_path}=${backup_ts_path}",
],
"base backup ok");
command_ok([ 'pg_verifybackup', $backup_path ],
$primary->start;
my $backup_path = $primary->backup_dir . '/test_options';
$primary->command_ok(
- [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup_path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"base backup ok");
-# Verify that pg_verifybackup -q succeeds and produces no output.
+# Verify that pg_verifybackup --quiet succeeds and produces no output.
my $stdout;
my $stderr;
-my $result = IPC::Run::run [ 'pg_verifybackup', '-q', $backup_path ],
- '>', \$stdout, '2>', \$stderr;
-ok($result, "-q succeeds: exit code 0");
-is($stdout, '', "-q succeeds: no stdout");
-is($stderr, '', "-q succeeds: no stderr");
-
-# Should still work if we specify -Fp.
-$primary->command_ok([ 'pg_verifybackup', '-Fp', $backup_path ],
- "verifies with -Fp");
+my $result = IPC::Run::run [ 'pg_verifybackup', '--quiet', $backup_path ],
+ '>' => \$stdout,
+ '2>' => \$stderr;
+ok($result, "--quiet succeeds: exit code 0");
+is($stdout, '', "--quiet succeeds: no stdout");
+is($stderr, '', "--quiet succeeds: no stderr");
+
+# Should still work if we specify --format=plain.
+$primary->command_ok(
+ [ 'pg_verifybackup', '--format' => 'plain', $backup_path ],
+ "verifies with --format=plain");
-# Should not work if we specify -Fy because that's invalid.
+# Should not work if we specify --format=y because that's invalid.
$primary->command_fails_like(
- [ 'pg_verifybackup', '-Fy', $backup_path ],
+ [ 'pg_verifybackup', '--format' => 'y', $backup_path ],
qr(invalid backup format "y", must be "plain" or "tar"),
- "does not verify with -Fy");
+ "does not verify with --format=y");
# Should produce a lengthy list of errors; we test for just one of those.
$primary->command_fails_like(
- [ 'pg_verifybackup', '-Ft', '-n', $backup_path ],
+ [
+ 'pg_verifybackup',
+ '--format' => 'tar',
+ '--no-parse-wal',
+ $backup_path
+ ],
qr("pg_multixact" is not a plain file),
- "does not verify with -Ft -n");
+ "does not verify with --format=tar --no-parse-wal");
# Test invalid options
command_fails_like(
# Verify that pg_verifybackup -q now fails.
command_fails_like(
- [ 'pg_verifybackup', '-q', $backup_path ],
+ [ 'pg_verifybackup', '--quiet', $backup_path ],
qr/checksum mismatch for file \"PG_VERSION\"/,
- '-q checksum mismatch');
+ '--quiet checksum mismatch');
# Since we didn't change the length of the file, verification should succeed
# if we ignore checksums. Check that we get the right message, too.
command_like(
- [ 'pg_verifybackup', '-s', $backup_path ],
+ [ 'pg_verifybackup', '--skip-checksums', $backup_path ],
qr/backup successfully verified/,
- '-s skips checksumming');
+ '--skip-checksums skips checksumming');
# Validation should succeed if we ignore the problem file. Also, check
# the progress information.
command_checks_all(
- [ 'pg_verifybackup', '--progress', '-i', 'PG_VERSION', $backup_path ],
+ [
+ 'pg_verifybackup',
+ '--progress',
+ '--ignore' => 'PG_VERSION',
+ $backup_path
+ ],
0,
[qr/backup successfully verified/],
[qr{(\d+/\d+ kB \(\d+%\) verified)+}],
- '-i ignores problem file');
+ '--ignore ignores problem file');
# PG_VERSION is already corrupt; let's try also removing all of pg_xact.
rmtree($backup_path . "/pg_xact");
# We're ignoring the problem with PG_VERSION, but not the problem with
# pg_xact, so verification should fail here.
command_fails_like(
- [ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
+ [ 'pg_verifybackup', '--ignore' => 'PG_VERSION', $backup_path ],
qr/pg_xact.*is present in the manifest but not on disk/,
- '-i does not ignore all problems');
+ '--ignore does not ignore all problems');
-# If we use -i twice, we should be able to ignore all of the problems.
+# If we use --ignore twice, we should be able to ignore all of the problems.
command_like(
- [ 'pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact', $backup_path ],
+ [
+ 'pg_verifybackup',
+ '--ignore' => 'PG_VERSION',
+ '--ignore' => 'pg_xact',
+ $backup_path
+ ],
qr/backup successfully verified/,
- 'multiple -i options work');
+ 'multiple --ignore options work');
-# Verify that when -i is not used, both problems are reported.
+# Verify that when --ignore is not used, both problems are reported.
$result = IPC::Run::run [ 'pg_verifybackup', $backup_path ],
'>', \$stdout, '2>', \$stderr;
ok(!$result, "multiple problems: fails");
qr/checksum mismatch for file \"PG_VERSION\"/,
"multiple problems: checksum mismatch reported");
-# Verify that when -e is used, only the problem detected first is reported.
-$result = IPC::Run::run [ 'pg_verifybackup', '-e', $backup_path ],
- '>', \$stdout, '2>', \$stderr;
-ok(!$result, "-e reports 1 error: fails");
+# Verify that when --exit-on-error is used, only the problem detected
+# first is reported.
+$result =
+ IPC::Run::run [ 'pg_verifybackup', '--exit-on-error', $backup_path ],
+ '>' => \$stdout,
+ '2>' => \$stderr;
+ok(!$result, "--exit-on-error reports 1 error: fails");
like(
$stderr,
qr/pg_xact.*is present in the manifest but not on disk/,
- "-e reports 1 error: missing files reported");
+ "--exit-on-error reports 1 error: missing files reported");
unlike(
$stderr,
qr/checksum mismatch for file \"PG_VERSION\"/,
- "-e reports 1 error: checksum mismatch not reported");
+ "--exit-on-error reports 1 error: checksum mismatch not reported");
# Test valid manifest with nonexistent backup directory.
command_fails_like(
[
- 'pg_verifybackup', '-m',
- "$backup_path/backup_manifest", "$backup_path/fake"
+ 'pg_verifybackup',
+ '--manifest-path' => "$backup_path/backup_manifest",
+ "$backup_path/fake"
],
qr/could not open directory/,
'nonexistent backup directory');
my $backup_path = $primary->backup_dir . '/test_encoding';
$primary->command_ok(
[
- 'pg_basebackup', '-D',
- $backup_path, '--no-sync',
- '-cfast', '--manifest-force-encode'
+ 'pg_basebackup',
+ '--pgdata' => $backup_path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--manifest-force-encode',
],
"backup ok with forced hex encoding");
'>', 100, "many paths are encoded in the manifest");
command_like(
- [ 'pg_verifybackup', '-s', $backup_path ],
+ [ 'pg_verifybackup', '--skip-checksums', $backup_path ],
qr/backup successfully verified/,
'backup with forced encoding verified');
$primary->start;
my $backup_path = $primary->backup_dir . '/test_wal';
$primary->command_ok(
- [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup_path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"base backup ok");
# Rename pg_wal.
'missing pg_wal causes failure');
# Should work if we skip WAL verification.
-command_ok(
- [ 'pg_verifybackup', '-n', $backup_path ],
+command_ok([ 'pg_verifybackup', '--no-parse-wal', $backup_path ],
'missing pg_wal OK if not verifying WAL');
# Should also work if we specify the correct WAL location.
-command_ok([ 'pg_verifybackup', '-w', $relocated_pg_wal, $backup_path ],
- '-w can be used to specify WAL directory');
+command_ok(
+ [
+ 'pg_verifybackup',
+ '--wal-directory' => $relocated_pg_wal,
+ $backup_path
+ ],
+ '--wal-directory can be used to specify WAL directory');
# Move directory back to original location.
rename($relocated_pg_wal, $original_pg_wal) || die "rename pg_wal back: $!";
# The base backup run below does a checkpoint, that removes the first segment
# of the current timeline.
$primary->command_ok(
- [ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup_path2,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"base backup 2 ok");
command_ok(
[ 'pg_verifybackup', $backup_path2 ],
"found expected backup files, compression $method");
# Verify tar backup.
- $primary->command_ok([ 'pg_verifybackup', '-n', '-e', $backup_path ],
+ $primary->command_ok(
+ [
+ 'pg_verifybackup', '--no-parse-wal',
+ '--exit-on-error', $backup_path,
+ ],
"verify backup, compression $method");
# Cleanup.
# invalid option arguments
command_fails_like(
- [ 'pg_waldump', '--block', 'bad' ],
+ [ 'pg_waldump', '--block' => 'bad' ],
qr/error: invalid block number/,
'invalid block number');
command_fails_like(
- [ 'pg_waldump', '--fork', 'bad' ],
+ [ 'pg_waldump', '--fork' => 'bad' ],
qr/error: invalid fork name/,
'invalid fork name');
command_fails_like(
- [ 'pg_waldump', '--limit', 'bad' ],
+ [ 'pg_waldump', '--limit' => 'bad' ],
qr/error: invalid value/,
'invalid limit');
command_fails_like(
- [ 'pg_waldump', '--relation', 'bad' ],
+ [ 'pg_waldump', '--relation' => 'bad' ],
qr/error: invalid relation/,
'invalid relation specification');
command_fails_like(
- [ 'pg_waldump', '--rmgr', 'bad' ],
+ [ 'pg_waldump', '--rmgr' => 'bad' ],
qr/error: resource manager .* does not exist/,
'invalid rmgr name');
command_fails_like(
- [ 'pg_waldump', '--start', 'bad' ],
+ [ 'pg_waldump', '--start' => 'bad' ],
qr/error: invalid WAL location/,
'invalid start LSN');
command_fails_like(
- [ 'pg_waldump', '--end', 'bad' ],
+ [ 'pg_waldump', '--end' => 'bad' ],
qr/error: invalid WAL location/,
'invalid end LSN');
qr/./,
'runs with start and end segment specified');
command_fails_like(
- [ 'pg_waldump', '-p', $node->data_dir ],
+ [ 'pg_waldump', '--path' => $node->data_dir ],
qr/error: no start WAL location given/,
'path option requires start location');
command_like(
[
- 'pg_waldump', '-p', $node->data_dir, '--start',
- $start_lsn, '--end', $end_lsn
+ 'pg_waldump',
+ '--path' => $node->data_dir,
+ '--start' => $start_lsn,
+ '--end' => $end_lsn,
],
qr/./,
'runs with path option and start and end locations');
command_fails_like(
- [ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ],
+ [
+ 'pg_waldump',
+ '--path' => $node->data_dir,
+ '--start' => $start_lsn,
+ ],
qr/error: error in WAL record at/,
'falling off the end of the WAL results in an error');
qr/^$/,
'no output with --quiet option');
command_fails_like(
- [ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ],
+ [
+ 'pg_waldump', '--quiet',
+ '--path' => $node->data_dir,
+ '--start' => $start_lsn
+ ],
qr/error: error in WAL record at/,
'errors are shown with --quiet');
my (@cmd, $stdout, $stderr, $result);
@cmd = (
- 'pg_waldump', '--start', $new_start,
+ 'pg_waldump',
+ '--start' => $new_start,
$node->data_dir . '/pg_wal/' . $start_walfile);
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "runs with start segment and start LSN specified");
my (@cmd, $stdout, $stderr, $result, @lines);
@cmd = (
- 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end',
- $end_lsn);
+ 'pg_waldump',
+ '--path' => $node->data_dir,
+ '--start' => $start_lsn,
+ '--end' => $end_lsn);
push @cmd, @opts;
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "pg_waldump @opts: runs ok");
@lines = test_pg_waldump;
is(grep(!/^rmgr: \w/, @lines), 0, 'all output lines are rmgr lines');
-@lines = test_pg_waldump('--limit', 6);
+@lines = test_pg_waldump('--limit' => 6);
is(@lines, 6, 'limit option observed');
@lines = test_pg_waldump('--fullpage');
like($lines[0], qr/WAL statistics/, "statistics on stdout");
is(grep(/^rmgr:/, @lines), 0, 'no rmgr lines output');
-@lines = test_pg_waldump('--rmgr', 'Btree');
+@lines = test_pg_waldump('--rmgr' => 'Btree');
is(grep(!/^rmgr: Btree/, @lines), 0, 'only Btree lines');
-@lines = test_pg_waldump('--fork', 'init');
+@lines = test_pg_waldump('--fork' => 'init');
is(grep(!/fork init/, @lines), 0, 'only init fork lines');
-@lines = test_pg_waldump('--relation',
- "$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
+@lines = test_pg_waldump(
+ '--relation' => "$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines),
0, 'only lines for selected relation');
-@lines =
- test_pg_waldump('--relation',
- "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid",
- '--block', 1);
+@lines = test_pg_waldump(
+ '--relation' => "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid",
+ '--block' => 1);
is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block');
$node->command_ok(
[
- 'pg_waldump', '--quiet',
- '--save-fullpage', "$tmp_folder/raw",
- '--relation', $relation,
+ 'pg_waldump',
+ '--quiet',
+ '--save-fullpage' => "$tmp_folder/raw",
+ '--relation' => $relation,
$walfile
],
'pg_waldump with --save-fullpage runs');
{
my ($stderr);
- run_log([ 'pgbench', '-j', '2', '--bad-option' ], '2>', \$stderr);
+ run_log([ 'pgbench', '--jobs' => '2', '--bad-option' ], '2>', \$stderr);
$nthreads = 1 if $stderr =~ m/threads are not supported on this platform/;
}
# Tests with ON_ERROR_STOP.
$node->command_ok(
[
- 'psql', '-X',
- '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-c',
- 'INSERT INTO tab_psql_single VALUES (1)', '-c',
- 'INSERT INTO tab_psql_single VALUES (2)'
+ 'psql',
+ '--no-psqlrc',
+ '--single-transaction',
+ '--set' => 'ON_ERROR_STOP=1',
+ '--command' => 'INSERT INTO tab_psql_single VALUES (1)',
+ '--command' => 'INSERT INTO tab_psql_single VALUES (2)',
],
'ON_ERROR_STOP, --single-transaction and multiple -c switches');
my $row_count =
$node->command_fails(
[
- 'psql', '-X',
- '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-c',
- 'INSERT INTO tab_psql_single VALUES (3)', '-c',
- "\\copy tab_psql_single FROM '$tempdir/nonexistent'"
+ 'psql',
+ '--no-psqlrc',
+ '--single-transaction',
+ '--set' => 'ON_ERROR_STOP=1',
+ '--command' => 'INSERT INTO tab_psql_single VALUES (3)',
+ '--command' => "\\copy tab_psql_single FROM '$tempdir/nonexistent'"
],
'ON_ERROR_STOP, --single-transaction and multiple -c switches, error');
$row_count =
append_to_file($insert_sql_file, 'INSERT INTO tab_psql_single VALUES (4);');
$node->command_ok(
[
- 'psql', '-X', '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
- $insert_sql_file
+ 'psql',
+ '--no-psqlrc',
+ '--single-transaction',
+ '--set' => 'ON_ERROR_STOP=1',
+ '--file' => $insert_sql_file,
+ '--file' => $insert_sql_file
],
'ON_ERROR_STOP, --single-transaction and multiple -f switches');
$row_count =
$node->command_fails(
[
- 'psql', '-X', '--single-transaction', '-v',
- 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f',
- $copy_sql_file
+ 'psql',
+ '--no-psqlrc',
+ '--single-transaction',
+ '--set' => 'ON_ERROR_STOP=1',
+ '--file' => $insert_sql_file,
+ '--file' => $copy_sql_file
],
'ON_ERROR_STOP, --single-transaction and multiple -f switches, error');
$row_count =
# transaction commits.
$node->command_fails(
[
- 'psql', '-X',
- '--single-transaction', '-f',
- $insert_sql_file, '-f',
- $insert_sql_file, '-c',
- "\\copy tab_psql_single FROM '$tempdir/nonexistent'"
+ 'psql',
+ '--no-psqlrc',
+ '--single-transaction',
+ '--file' => $insert_sql_file,
+ '--file' => $insert_sql_file,
+ '--command' => "\\copy tab_psql_single FROM '$tempdir/nonexistent'"
],
'no ON_ERROR_STOP, --single-transaction and multiple -f/-c switches');
$row_count =
# returns a success and the transaction commits.
$node->command_ok(
[
- 'psql', '-X', '--single-transaction', '-f',
- $insert_sql_file, '-f', $insert_sql_file, '-f',
- $copy_sql_file
+ 'psql',
+ '--no-psqlrc',
+ '--single-transaction',
+ '--file' => $insert_sql_file,
+ '--file' => $insert_sql_file,
+ '--file' => $copy_sql_file
],
'no ON_ERROR_STOP, --single-transaction and multiple -f switches');
$row_count =
# the transaction commit even if there is a failure in-between.
$node->command_ok(
[
- 'psql', '-X',
- '--single-transaction', '-c',
- 'INSERT INTO tab_psql_single VALUES (5)', '-f',
- $copy_sql_file, '-c',
- 'INSERT INTO tab_psql_single VALUES (6)'
+ 'psql',
+ '--no-psqlrc',
+ '--single-transaction',
+ '--command' => 'INSERT INTO tab_psql_single VALUES (5)',
+ '--file' => $copy_sql_file,
+ '--command' => 'INSERT INTO tab_psql_single VALUES (6)'
],
'no ON_ERROR_STOP, --single-transaction and multiple -c switches');
$row_count =
qr/statement: CLUSTER;/,
'SQL CLUSTER run');
-$node->command_fails([ 'clusterdb', '-t', 'nonexistent' ],
+$node->command_fails([ 'clusterdb', '--table' => 'nonexistent' ],
'fails with nonexistent table');
$node->safe_psql('postgres',
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'
);
$node->issues_sql_like(
- [ 'clusterdb', '-t', 'test1' ],
+ [ 'clusterdb', '--table' => 'test1' ],
qr/statement: CLUSTER public\.test1;/,
'cluster specific table');
# clusterdb -a is not compatible with -d. This relies on PGDATABASE to be
# set, something PostgreSQL::Test::Cluster does.
$node->issues_sql_like(
- [ 'clusterdb', '-a' ],
+ [ 'clusterdb', '--all' ],
qr/statement: CLUSTER.*statement: CLUSTER/s,
'cluster all databases');
CREATE DATABASE regression_invalid;
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
-$node->command_ok([ 'clusterdb', '-a' ],
+$node->command_ok([ 'clusterdb', '--all' ],
'invalid database not targeted by clusterdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_clusterdb.pl as well.
$node->command_fails_like(
- [ 'clusterdb', '-d', 'regression_invalid' ],
+ [ 'clusterdb', '--dbname' => 'regression_invalid' ],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'clusterdb cannot target invalid database');
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'
);
$node->issues_sql_like(
- [ 'clusterdb', '-a', '-t', 'test1' ],
+ [ 'clusterdb', '--all', '--table' => 'test1' ],
qr/statement: CLUSTER public\.test1/s,
'cluster specific table in all databases');
qr/statement: CREATE DATABASE foobar1/,
'SQL CREATE DATABASE run');
$node->issues_sql_like(
- [ 'createdb', '-l', 'C', '-E', 'LATIN1', '-T', 'template0', 'foobar2' ],
+ [
+ 'createdb',
+ '--locale' => 'C',
+ '--encoding' => 'LATIN1',
+ '--template' => 'template0',
+ 'foobar2',
+ ],
qr/statement: CREATE DATABASE foobar2 ENCODING 'LATIN1'/,
'create database with encoding');
# provider. XXX Maybe split into multiple tests?
$node->command_fails(
[
- 'createdb', '-T', 'template0', '-E', 'UTF8',
- '--locale-provider=icu', 'foobar4'
+ 'createdb',
+ '--template' => 'template0',
+ '--encoding' => 'UTF8',
+ '--locale-provider' => 'icu',
+ 'foobar4',
],
'create database with ICU fails without ICU locale specified');
$node->issues_sql_like(
[
- 'createdb', '-T',
- 'template0', '-E',
- 'UTF8', '--locale-provider=icu',
- '--locale=C', '--icu-locale=en',
- 'foobar5'
+ 'createdb',
+ '--template' => 'template0',
+ '--encoding' => 'UTF8',
+ '--locale-provider' => 'icu',
+ '--locale' => 'C',
+ '--icu-locale' => 'en',
+ 'foobar5',
],
qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
'create database with ICU locale specified');
$node->command_fails(
[
- 'createdb', '-T', 'template0', '-E', 'UTF8',
- '--locale-provider=icu',
- '--icu-locale=@colNumeric=lower', 'foobarX'
+ 'createdb',
+ '--template' => 'template0',
+ '--encoding' => 'UTF8',
+ '--locale-provider' => 'icu',
+ '--icu-locale' => '@colNumeric=lower',
+ 'foobarX',
],
'fails for invalid ICU locale');
$node->command_fails_like(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=icu',
- '--encoding=SQL_ASCII', 'foobarX'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'icu',
+ '--encoding' => 'SQL_ASCII',
+ 'foobarX',
],
qr/ERROR: encoding "SQL_ASCII" is not supported with ICU provider/,
'fails for encoding not supported by ICU');
$node2->command_ok(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=libc',
- 'foobar55'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'libc',
+ 'foobar55',
],
'create database with libc provider from template database with icu provider'
);
$node2->command_ok(
[
- 'createdb', '-T', 'template0', '--icu-locale', 'en-US',
- 'foobar56'
+ 'createdb',
+ '--template' => 'template0',
+ '--icu-locale' => 'en-US',
+ 'foobar56',
],
'create database with icu locale from template database with icu provider'
);
$node2->command_ok(
[
- 'createdb', '-T',
- 'template0', '--locale-provider',
- 'icu', '--locale',
- 'en', '--lc-collate',
- 'C', '--lc-ctype',
- 'C', 'foobar57'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'icu',
+ '--locale' => 'en',
+ '--lc-collate' => 'C',
+ '--lc-ctype' => 'C',
+ 'foobar57',
],
'create database with locale as ICU locale');
}
else
{
$node->command_fails(
- [ 'createdb', '-T', 'template0', '--locale-provider=icu', 'foobar4' ],
+ [
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'icu',
+ 'foobar4',
+ ],
'create database with ICU fails since no ICU support');
}
$node->command_fails(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- 'tbuiltin1'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ 'tbuiltin1',
],
'create database with provider "builtin" fails without --locale');
$node->command_ok(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- '--locale=C', 'tbuiltin2'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ '--locale' => 'C',
+ 'tbuiltin2',
],
'create database with provider "builtin" and locale "C"');
$node->command_ok(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- '--locale=C', '--lc-collate=C',
- 'tbuiltin3'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ '--locale' => 'C',
+ '--lc-collate' => 'C',
+ 'tbuiltin3',
],
'create database with provider "builtin" and LC_COLLATE=C');
$node->command_ok(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- '--locale=C', '--lc-ctype=C',
- 'tbuiltin4'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ '--locale' => 'C',
+ '--lc-ctype' => 'C',
+ 'tbuiltin4',
],
'create database with provider "builtin" and LC_CTYPE=C');
$node->command_ok(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- '--lc-collate=C', '--lc-ctype=C',
- '-E UTF-8', '--builtin-locale=C.UTF8',
- 'tbuiltin5'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ '--lc-collate' => 'C',
+ '--lc-ctype' => 'C',
+ '--encoding' => 'UTF-8',
+ '--builtin-locale' => 'C.UTF8',
+ 'tbuiltin5',
],
'create database with --builtin-locale C.UTF-8 and -E UTF-8');
$node->command_fails(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- '--lc-collate=C', '--lc-ctype=C',
- '-E LATIN1', '--builtin-locale=C.UTF-8',
- 'tbuiltin6'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ '--lc-collate' => 'C',
+ '--lc-ctype' => 'C',
+ '--encoding' => 'LATIN1',
+ '--builtin-locale' => 'C.UTF-8',
+ 'tbuiltin6',
],
'create database with --builtin-locale C.UTF-8 and -E LATIN1');
$node->command_fails(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- '--locale=C', '--icu-locale=en',
- 'tbuiltin7'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ '--locale' => 'C',
+ '--icu-locale' => 'en',
+ 'tbuiltin7',
],
'create database with provider "builtin" and ICU_LOCALE="en"');
$node->command_fails(
[
- 'createdb', '-T',
- 'template0', '--locale-provider=builtin',
- '--locale=C', '--icu-rules=""',
- 'tbuiltin8'
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'builtin',
+ '--locale' => 'C',
+ '--icu-rules' => '""',
+ 'tbuiltin8',
],
'create database with provider "builtin" and ICU_RULES=""');
$node->command_fails(
[
- 'createdb', '-T',
- 'template1', '--locale-provider=builtin',
- '--locale=C', 'tbuiltin9'
+ 'createdb',
+ '--template' => 'template1',
+ '--locale-provider' => 'builtin',
+ '--locale' => 'C',
+ 'tbuiltin9',
],
'create database with provider "builtin" not matching template');
'fails if database already exists');
$node->command_fails(
- [ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
+ [
+ 'createdb',
+ '--template' => 'template0',
+ '--locale-provider' => 'xyz',
+ 'foobarX',
+ ],
'fails for invalid locale provider');
# Check use of templates with shared dependencies copied from the template.
ALTER TABLE tab_foobar owner to role_foobar;
CREATE POLICY pol_foobar ON tab_foobar FOR ALL TO role_foobar;');
$node->issues_sql_like(
- [ 'createdb', '-l', 'C', '-T', 'foobar2', 'foobar3' ],
+ [ 'createdb', '--locale' => 'C', '--template' => 'foobar2', 'foobar3' ],
qr/statement: CREATE DATABASE foobar3 TEMPLATE foobar2 LOCALE 'C'/,
'create database with template');
($ret, $stdout, $stderr) = $node->psql(
1,
[qr/^$/],
[
- qr/^createdb: error: database creation failed: ERROR: invalid LC_COLLATE locale name|^createdb: error: database creation failed: ERROR: new collation \(foo'; SELECT '1\) is incompatible with the collation of the template database/s
+ qr/^createdb: error: database creation failed: ERROR: invalid LC_COLLATE locale name|^createdb: error: database creation failed: ERROR: new collation \(foo'; SELECT '1\) is incompatible with the collation of the template database/s,
],
'createdb with incorrect --lc-collate');
$node->command_checks_all(
1,
[qr/^$/],
[
- qr/^createdb: error: database creation failed: ERROR: invalid LC_CTYPE locale name|^createdb: error: database creation failed: ERROR: new LC_CTYPE \(foo'; SELECT '1\) is incompatible with the LC_CTYPE of the template database/s
+ qr/^createdb: error: database creation failed: ERROR: invalid LC_CTYPE locale name|^createdb: error: database creation failed: ERROR: new LC_CTYPE \(foo'; SELECT '1\) is incompatible with the LC_CTYPE of the template database/s,
],
'createdb with incorrect --lc-ctype');
1,
[qr/^$/],
[
- qr/^createdb: error: database creation failed: ERROR: invalid create database strategy "foo"/s
+ qr/^createdb: error: database creation failed: ERROR: invalid create database strategy "foo"/s,
],
'createdb with incorrect --strategy');
# Check database creation strategy
$node->issues_sql_like(
- [ 'createdb', '-T', 'foobar2', '-S', 'wal_log', 'foobar6' ],
+ [
+ 'createdb',
+ '--template' => 'foobar2',
+ '--strategy' => 'wal_log',
+ 'foobar6',
+ ],
qr/statement: CREATE DATABASE foobar6 STRATEGY wal_log TEMPLATE foobar2/,
'create database with WAL_LOG strategy');
$node->issues_sql_like(
- [ 'createdb', '-T', 'foobar2', '-S', 'WAL_LOG', 'foobar6s' ],
+ [
+ 'createdb',
+ '--template' => 'foobar2',
+ '--strategy' => 'WAL_LOG',
+ 'foobar6s',
+ ],
qr/statement: CREATE DATABASE foobar6s STRATEGY "WAL_LOG" TEMPLATE foobar2/,
'create database with WAL_LOG strategy');
$node->issues_sql_like(
- [ 'createdb', '-T', 'foobar2', '-S', 'file_copy', 'foobar7' ],
+ [
+ 'createdb',
+ '--template' => 'foobar2',
+ '--strategy' => 'file_copy',
+ 'foobar7',
+ ],
qr/statement: CREATE DATABASE foobar7 STRATEGY file_copy TEMPLATE foobar2/,
'create database with FILE_COPY strategy');
$node->issues_sql_like(
- [ 'createdb', '-T', 'foobar2', '-S', 'FILE_COPY', 'foobar7s' ],
+ [
+ 'createdb',
+ '--template' => 'foobar2',
+ '--strategy' => 'FILE_COPY',
+ 'foobar7s',
+ ],
qr/statement: CREATE DATABASE foobar7s STRATEGY "FILE_COPY" TEMPLATE foobar2/,
'create database with FILE_COPY strategy');
# Create database owned by role_foobar.
$node->issues_sql_like(
- [ 'createdb', '-T', 'foobar2', '-O', 'role_foobar', 'foobar8' ],
+ [
+ 'createdb',
+ '--template' => 'foobar2',
+ '--owner' => 'role_foobar',
+ 'foobar8',
+ ],
qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/,
'create database with owner role_foobar');
($ret, $stdout, $stderr) =
qr/statement: CREATE ROLE regress_user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
'SQL CREATE USER run');
$node->issues_sql_like(
- [ 'createuser', '-L', 'regress_role1' ],
+ [ 'createuser', '--no-login', 'regress_role1' ],
qr/statement: CREATE ROLE regress_role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS;/,
'create a non-login role');
$node->issues_sql_like(
- [ 'createuser', '-r', 'regress user2' ],
+ [ 'createuser', '--createrole', 'regress user2' ],
qr/statement: CREATE ROLE "regress user2" NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
'create a CREATEROLE user');
$node->issues_sql_like(
- [ 'createuser', '-s', 'regress_user3' ],
+ [ 'createuser', '--superuser', 'regress_user3' ],
qr/statement: CREATE ROLE regress_user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
'create a superuser');
$node->issues_sql_like(
[
- 'createuser', '-a',
- 'regress_user1', '-a',
- 'regress user2', 'regress user #4'
+ 'createuser',
+ '--with-admin' => 'regress_user1',
+ '--with-admin' => 'regress user2',
+ 'regress user #4'
],
qr/statement: CREATE ROLE "regress user #4" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1,"regress user2";/,
'add a role as a member with admin option of the newly created role');
$node->issues_sql_like(
[
- 'createuser', 'REGRESS_USER5', '-m', 'regress_user3',
- '-m', 'regress user #4'
+ 'createuser',
+ 'REGRESS_USER5',
+ '--with-member' => 'regress_user3',
+ '--with-member' => 'regress user #4'
],
qr/statement: CREATE ROLE "REGRESS_USER5" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user3,"regress user #4";/,
'add a role as a member of the newly created role');
$node->issues_sql_like(
- [ 'createuser', '-v', '2029 12 31', 'regress_user6' ],
+ [ 'createuser', '--valid-until' => '2029 12 31', 'regress_user6' ],
qr/statement: CREATE ROLE regress_user6 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS VALID UNTIL \'2029 12 31\';/,
'create a role with a password expiration date');
$node->issues_sql_like(
qr/statement: CREATE ROLE regress_user8 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/,
'create a role without BYPASSRLS');
$node->issues_sql_like(
- [ 'createuser', '--with-admin', 'regress_user1', 'regress_user9' ],
+ [ 'createuser', '--with-admin' => 'regress_user1', 'regress_user9' ],
qr/statement: CREATE ROLE regress_user9 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1;/,
'--with-admin');
$node->issues_sql_like(
- [ 'createuser', '--with-member', 'regress_user1', 'regress_user10' ],
+ [ 'createuser', '--with-member' => 'regress_user1', 'regress_user10' ],
qr/statement: CREATE ROLE regress_user10 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user1;/,
'--with-member');
$node->issues_sql_like(
- [ 'createuser', '--role', 'regress_user1', 'regress_user11' ],
+ [ 'createuser', '--role' => 'regress_user1', 'regress_user11' ],
qr/statement: CREATE ROLE regress_user11 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/,
'--role');
$node->issues_sql_like(
- [ 'createuser', 'regress_user12', '--member-of', 'regress_user1' ],
+ [ 'createuser', 'regress_user12', '--member-of' => 'regress_user1' ],
qr/statement: CREATE ROLE regress_user12 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/,
'--member-of');
$node->command_fails([ 'createuser', 'regress_user1' ],
'fails if role already exists');
$node->command_fails(
- [ 'createuser', 'regress_user1', '-m', 'regress_user2', 'regress_user3' ],
+ [
+ 'createuser',
+ 'regress_user1',
+ '--with-member' => 'regress_user2',
+ 'regress_user3'
+ ],
'fails for too many non-options');
done_testing();
$node->start;
$node->command_ok(
- [ 'pg_isready', "--timeout=$PostgreSQL::Test::Utils::timeout_default" ],
+ [
+ 'pg_isready',
+ '--timeout' => $PostgreSQL::Test::Utils::timeout_default,
+ ],
'succeeds with server running');
done_testing();
$node->safe_psql('postgres',
"TRUNCATE index_relfilenodes; $save_relfilenodes");
$node->issues_sql_like(
- [ 'reindexdb', '-s', 'postgres' ],
+ [ 'reindexdb', '--system', 'postgres' ],
qr/statement: REINDEX SYSTEM postgres;/,
'reindex system tables');
$relnode_info = $node->safe_psql('postgres', $compare_relfilenodes);
'relfilenode change after REINDEX SYSTEM');
$node->issues_sql_like(
- [ 'reindexdb', '-t', 'test1', 'postgres' ],
+ [ 'reindexdb', '--table' => 'test1', 'postgres' ],
qr/statement: REINDEX TABLE public\.test1;/,
'reindex specific table');
$node->issues_sql_like(
- [ 'reindexdb', '-t', 'test1', '--tablespace', $tbspace_name, 'postgres' ],
+ [
+ 'reindexdb',
+ '--table' => 'test1',
+ '--tablespace' => $tbspace_name,
+ 'postgres',
+ ],
qr/statement: REINDEX \(TABLESPACE $tbspace_name\) TABLE public\.test1;/,
'reindex specific table on tablespace');
$node->issues_sql_like(
- [ 'reindexdb', '-i', 'test1x', 'postgres' ],
+ [ 'reindexdb', '--index' => 'test1x', 'postgres' ],
qr/statement: REINDEX INDEX public\.test1x;/,
'reindex specific index');
$node->issues_sql_like(
- [ 'reindexdb', '-S', 'pg_catalog', 'postgres' ],
+ [ 'reindexdb', '--schema' => 'pg_catalog', 'postgres' ],
qr/statement: REINDEX SCHEMA pg_catalog;/,
'reindex specific schema');
$node->issues_sql_like(
- [ 'reindexdb', '-v', '-t', 'test1', 'postgres' ],
+ [ 'reindexdb', '--verbose', '--table' => 'test1', 'postgres' ],
qr/statement: REINDEX \(VERBOSE\) TABLE public\.test1;/,
'reindex with verbose output');
$node->issues_sql_like(
[
- 'reindexdb', '-v', '-t', 'test1',
- '--tablespace', $tbspace_name, 'postgres'
+ 'reindexdb',
+ '--verbose',
+ '--table' => 'test1',
+ '--tablespace' => $tbspace_name,
+ 'postgres',
],
qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE public\.test1;/,
'reindex with verbose output and tablespace');
'OID change after REINDEX DATABASE CONCURRENTLY');
$node->issues_sql_like(
- [ 'reindexdb', '--concurrently', '-t', 'test1', 'postgres' ],
+ [ 'reindexdb', '--concurrently', '--table' => 'test1', 'postgres' ],
qr/statement: REINDEX TABLE CONCURRENTLY public\.test1;/,
'reindex specific table concurrently');
$node->issues_sql_like(
- [ 'reindexdb', '--concurrently', '-i', 'test1x', 'postgres' ],
+ [ 'reindexdb', '--concurrently', '--index' => 'test1x', 'postgres' ],
qr/statement: REINDEX INDEX CONCURRENTLY public\.test1x;/,
'reindex specific index concurrently');
$node->issues_sql_like(
- [ 'reindexdb', '--concurrently', '-S', 'public', 'postgres' ],
+ [ 'reindexdb', '--concurrently', '--schema' => 'public', 'postgres' ],
qr/statement: REINDEX SCHEMA CONCURRENTLY public;/,
'reindex specific schema concurrently');
-$node->command_fails([ 'reindexdb', '--concurrently', '-s', 'postgres' ],
+$node->command_fails(
+ [ 'reindexdb', '--concurrently', '--system', 'postgres' ],
'reindex system tables concurrently');
$node->issues_sql_like(
- [ 'reindexdb', '--concurrently', '-v', '-t', 'test1', 'postgres' ],
+ [
+ 'reindexdb', '--concurrently', '--verbose',
+ '--table' => 'test1',
+ 'postgres',
+ ],
qr/statement: REINDEX \(VERBOSE\) TABLE CONCURRENTLY public\.test1;/,
'reindex with verbose output concurrently');
$node->issues_sql_like(
[
- 'reindexdb', '--concurrently', '-v', '-t',
- 'test1', '--tablespace', $tbspace_name, 'postgres'
+ 'reindexdb',
+ '--concurrently',
+ '--verbose',
+ '--table' => 'test1',
+ '--tablespace' => $tbspace_name,
+ 'postgres',
],
qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE CONCURRENTLY public\.test1;/,
'reindex concurrently with verbose output and tablespace');
# messages.
$node->command_checks_all(
[
- 'reindexdb', '-t', $toast_table, '--tablespace',
- $tbspace_name, 'postgres'
+ 'reindexdb',
+ '--table' => $toast_table,
+ '--tablespace' => $tbspace_name,
+ 'postgres',
],
1,
[],
'reindex toast table with tablespace');
$node->command_checks_all(
[
- 'reindexdb', '--concurrently', '-t', $toast_table,
- '--tablespace', $tbspace_name, 'postgres'
+ 'reindexdb',
+ '--concurrently',
+ '--table' => $toast_table,
+ '--tablespace' => $tbspace_name,
+ 'postgres',
],
1,
[],
'reindex toast table concurrently with tablespace');
$node->command_checks_all(
[
- 'reindexdb', '-i', $toast_index, '--tablespace',
- $tbspace_name, 'postgres'
+ 'reindexdb',
+ '--index' => $toast_index,
+ '--tablespace' => $tbspace_name,
+ 'postgres',
],
1,
[],
'reindex toast index with tablespace');
$node->command_checks_all(
[
- 'reindexdb', '--concurrently', '-i', $toast_index,
- '--tablespace', $tbspace_name, 'postgres'
+ 'reindexdb',
+ '--concurrently',
+ '--index' => $toast_index,
+ '--tablespace' => $tbspace_name,
+ 'postgres',
],
1,
[],
|);
$node->command_fails(
- [ 'reindexdb', '-j', '2', '-s', 'postgres' ],
+ [ 'reindexdb', '--jobs' => '2', '--system', 'postgres' ],
'parallel reindexdb cannot process system catalogs');
$node->command_ok(
- [ 'reindexdb', '-j', '2', '-i', 's1.i1', '-i', 's2.i2', 'postgres' ],
+ [
+ 'reindexdb',
+ '--jobs' => '2',
+ '--index' => 's1.i1',
+ '--index' => 's2.i2',
+ 'postgres',
+ ],
'parallel reindexdb for indices');
# Note that the ordering of the commands is not stable, so the second
# command for s2.t2 is not checked after.
$node->issues_sql_like(
- [ 'reindexdb', '-j', '2', '-S', 's1', '-S', 's2', 'postgres' ],
+ [
+ 'reindexdb',
+ '--jobs' => '2',
+ '--schema' => 's1',
+ '--schema' => 's2',
+ 'postgres',
+ ],
qr/statement:\ REINDEX TABLE s1.t1;/,
'parallel reindexdb for schemas does a per-table REINDEX');
-$node->command_ok(
- [ 'reindexdb', '-j', '2', '-S', 's3' ],
+$node->command_ok([ 'reindexdb', '--jobs' => '2', '--schema' => 's3' ],
'parallel reindexdb with empty schema');
$node->command_ok(
- [ 'reindexdb', '-j', '2', '--concurrently', '-d', 'postgres' ],
+ [
+ 'reindexdb',
+ '--jobs' => '2',
+ '--concurrently',
+ '--dbname' => 'postgres',
+ ],
'parallel reindexdb on database, concurrently');
# combinations of objects
$node->issues_sql_like(
- [ 'reindexdb', '-s', '-t', 'test1', 'postgres' ],
+ [ 'reindexdb', '--system', '--table' => 'test1', 'postgres' ],
qr/statement:\ REINDEX SYSTEM postgres;/,
'specify both --system and --table');
$node->issues_sql_like(
- [ 'reindexdb', '-s', '-i', 'test1x', 'postgres' ],
+ [ 'reindexdb', '--system', '--index' => 'test1x', 'postgres' ],
qr/statement:\ REINDEX INDEX public.test1x;/,
'specify both --system and --index');
$node->issues_sql_like(
- [ 'reindexdb', '-s', '-S', 'pg_catalog', 'postgres' ],
+ [ 'reindexdb', '--system', '--schema' => 'pg_catalog', 'postgres' ],
qr/statement:\ REINDEX SCHEMA pg_catalog;/,
'specify both --system and --schema');
$node->safe_psql('template1',
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a);');
$node->issues_sql_like(
- [ 'reindexdb', '-a' ],
+ [ 'reindexdb', '--all' ],
qr/statement: REINDEX.*statement: REINDEX/s,
'reindex all databases');
$node->issues_sql_like(
- [ 'reindexdb', '-a', '-s' ],
+ [ 'reindexdb', '--all', '--system' ],
qr/statement: REINDEX SYSTEM postgres/s,
'reindex system catalogs in all databases');
$node->issues_sql_like(
- [ 'reindexdb', '-a', '-S', 'public' ],
+ [ 'reindexdb', '--all', '--schema' => 'public' ],
qr/statement: REINDEX SCHEMA public/s,
'reindex schema in all databases');
$node->issues_sql_like(
- [ 'reindexdb', '-a', '-i', 'test1x' ],
+ [ 'reindexdb', '--all', '--index' => 'test1x' ],
qr/statement: REINDEX INDEX public\.test1x/s,
'reindex index in all databases');
$node->issues_sql_like(
- [ 'reindexdb', '-a', '-t', 'test1' ],
+ [ 'reindexdb', '--all', '--table' => 'test1' ],
qr/statement: REINDEX TABLE public\.test1/s,
'reindex table in all databases');
CREATE DATABASE regression_invalid;
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
-$node->command_ok([ 'reindexdb', '-a' ],
- 'invalid database not targeted by reindexdb -a');
+$node->command_ok([ 'reindexdb', '--all' ],
+ 'invalid database not targeted by reindexdb --all');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 090_reindexdb.pl as well.
$node->command_fails_like(
- [ 'reindexdb', '-d', 'regression_invalid' ],
+ [ 'reindexdb', '--dbname' => 'regression_invalid' ],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'reindexdb cannot target invalid database');
[ 'vacuumdb', '--analyze-only', '--no-process-toast', 'postgres' ],
'--analyze-only and --no-process-toast specified together');
$node->issues_sql_like(
- [ 'vacuumdb', '-P', 2, 'postgres' ],
+ [ 'vacuumdb', '--parallel' => 2, 'postgres' ],
qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 2\).*;/,
'vacuumdb -P 2');
$node->issues_sql_like(
- [ 'vacuumdb', '-P', 0, 'postgres' ],
+ [ 'vacuumdb', '--parallel' => 0, 'postgres' ],
qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 0\).*;/,
'vacuumdb -P 0');
$node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)],
'column list');
$node->command_fails(
- [ 'vacuumdb', '--analyze', '--table', 'vactable(c)', 'postgres' ],
+ [ 'vacuumdb', '--analyze', '--table' => 'vactable(c)', 'postgres' ],
'incorrect column name with ANALYZE');
-$node->command_fails([ 'vacuumdb', '-P', -1, 'postgres' ],
+$node->command_fails([ 'vacuumdb', '--parallel' => -1, 'postgres' ],
'negative parallel degree');
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze', '--table', 'vactable(a, b)', 'postgres' ],
+ [ 'vacuumdb', '--analyze', '--table' => 'vactable(a, b)', 'postgres' ],
qr/statement: VACUUM \(SKIP_DATABASE_STATS, ANALYZE\) public.vactable\(a, b\);/,
'vacuumdb --analyze with complete column list');
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--table', 'vactable(b)', 'postgres' ],
+ [ 'vacuumdb', '--analyze-only', '--table' => 'vactable(b)', 'postgres' ],
qr/statement: ANALYZE public.vactable\(b\);/,
'vacuumdb --analyze-only with partial column list');
$node->command_checks_all(
- [ 'vacuumdb', '--analyze', '--table', 'vacview', 'postgres' ],
+ [ 'vacuumdb', '--analyze', '--table' => 'vacview', 'postgres' ],
0,
[qr/^.*vacuuming database "postgres"/],
[qr/^WARNING.*cannot vacuum non-tables or special system tables/s],
'vacuumdb with view');
$node->command_fails(
- [ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '0', 'postgres' ],
+ [
+ 'vacuumdb',
+ '--table' => 'vactable',
+ '--min-mxid-age' => '0',
+ 'postgres'
+ ],
'vacuumdb --min-mxid-age with incorrect value');
$node->command_fails(
- [ 'vacuumdb', '--table', 'vactable', '--min-xid-age', '0', 'postgres' ],
+ [
+ 'vacuumdb',
+ '--table' => 'vactable',
+ '--min-xid-age' => '0',
+ 'postgres'
+ ],
'vacuumdb --min-xid-age with incorrect value');
$node->issues_sql_like(
[
- 'vacuumdb', '--table', 'vactable', '--min-mxid-age',
- '2147483000', 'postgres'
+ 'vacuumdb',
+ '--table' => 'vactable',
+ '--min-mxid-age' => '2147483000',
+ 'postgres'
],
qr/GREATEST.*relminmxid.*2147483000/,
'vacuumdb --table --min-mxid-age');
$node->issues_sql_like(
- [ 'vacuumdb', '--min-xid-age', '2147483001', 'postgres' ],
+ [ 'vacuumdb', '--min-xid-age' => '2147483001', 'postgres' ],
qr/GREATEST.*relfrozenxid.*2147483001/,
'vacuumdb --table --min-xid-age');
$node->issues_sql_like(
- [ 'vacuumdb', '--schema', '"Foo"', 'postgres' ],
+ [ 'vacuumdb', '--schema' => '"Foo"', 'postgres' ],
qr/VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar/,
'vacuumdb --schema');
$node->issues_sql_like(
- [ 'vacuumdb', '--schema', '"Foo"', '--schema', '"Bar"', 'postgres' ],
+ [ 'vacuumdb', '--schema' => '"Foo"', '--schema' => '"Bar"', 'postgres' ],
qr/VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz
/sx,
'vacuumdb multiple --schema switches');
$node->issues_sql_like(
- [ 'vacuumdb', '--exclude-schema', '"Foo"', 'postgres' ],
+ [ 'vacuumdb', '--exclude-schema' => '"Foo"', 'postgres' ],
qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s,
'vacuumdb --exclude-schema');
$node->issues_sql_like(
[
- 'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema',
- '"Bar"', 'postgres'
+ 'vacuumdb',
+ '--exclude-schema' => '"Foo"',
+ '--exclude-schema' => '"Bar"',
+ 'postgres'
],
qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
| VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx,
'vacuumdb multiple --exclude-schema switches');
$node->command_fails_like(
- [ 'vacuumdb', '-N', 'pg_catalog', '-t', 'pg_class', 'postgres', ],
+ [
+ 'vacuumdb',
+ '--exclude-schema' => 'pg_catalog',
+ '--table' => 'pg_class',
+ 'postgres',
+ ],
qr/cannot vacuum specific table\(s\) and exclude schema\(s\) at the same time/,
- 'cannot use options -N and -t at the same time');
+ 'cannot use options --excludes-chema and ---table at the same time');
$node->command_fails_like(
- [ 'vacuumdb', '-n', 'pg_catalog', '-t', 'pg_class', 'postgres' ],
+ [
+ 'vacuumdb',
+ '--schema' => 'pg_catalog',
+ '--table' => 'pg_class',
+ 'postgres'
+ ],
qr/cannot vacuum all tables in schema\(s\) and specific table\(s\) at the same time/,
- 'cannot use options -n and -t at the same time');
+ 'cannot use options --schema and ---table at the same time');
$node->command_fails_like(
- [ 'vacuumdb', '-n', 'pg_catalog', '-N', '"Foo"', 'postgres' ],
+ [
+ 'vacuumdb',
+ '--schema' => 'pg_catalog',
+ '--exclude-schema' => '"Foo"',
+ 'postgres'
+ ],
qr/cannot vacuum all tables in schema\(s\) and exclude schema\(s\) at the same time/,
- 'cannot use options -n and -N at the same time');
+ 'cannot use options --schema and --exclude-schema at the same time');
$node->issues_sql_like(
- [ 'vacuumdb', '-a', '-N', 'pg_catalog' ],
+ [ 'vacuumdb', '--all', '--exclude-schema' => 'pg_catalog' ],
qr/(?:(?!VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class).)*/,
- 'vacuumdb -a -N');
+ 'vacuumdb --all --exclude-schema');
$node->issues_sql_like(
- [ 'vacuumdb', '-a', '-n', 'pg_catalog' ],
+ [ 'vacuumdb', '--all', '--schema' => 'pg_catalog' ],
qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/,
- 'vacuumdb -a -n');
+ 'vacuumdb --all ---schema');
$node->issues_sql_like(
- [ 'vacuumdb', '-a', '-t', 'pg_class' ],
+ [ 'vacuumdb', '--all', '--table' => 'pg_class' ],
qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/,
- 'vacuumdb -a -t');
+ 'vacuumdb --all --table');
$node->command_fails_like(
- [ 'vacuumdb', '-a', '-d', 'postgres' ],
+ [ 'vacuumdb', '--all', '-d' => 'postgres' ],
qr/cannot vacuum all databases and a specific one at the same time/,
- 'cannot use options -a and -d at the same time');
+ 'cannot use options --all and --dbname at the same time');
$node->command_fails_like(
- [ 'vacuumdb', '-a', 'postgres' ],
+ [ 'vacuumdb', '--all', 'postgres' ],
qr/cannot vacuum all databases and a specific one at the same time/,
- 'cannot use option -a and a dbname as argument at the same time');
+ 'cannot use option --all and a dbname as argument at the same time');
done_testing();
$node->start;
$node->issues_sql_like(
- [ 'vacuumdb', '-a' ],
+ [ 'vacuumdb', '--all' ],
qr/statement: VACUUM.*statement: VACUUM/s,
'vacuum all databases');
CREATE DATABASE regression_invalid;
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
-$node->command_ok([ 'vacuumdb', '-a' ],
+$node->command_ok([ 'vacuumdb', '--all' ],
'invalid database not targeted by vacuumdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_vacuumdb.pl as well.
$node->command_fails_like(
- [ 'vacuumdb', '-d', 'regression_invalid' ],
+ [ 'vacuumdb', '--dbname' => 'regression_invalid' ],
qr/FATAL: cannot connect to invalid database "regression_invalid"/,
'vacuumdb cannot target invalid database');
# a replication command and a SQL command.
$node_primary->command_fails_like(
[
- 'psql', '-X', '-c', "SELECT pg_backup_start('backup', true)",
- '-c', 'BASE_BACKUP', '-d', $connstr
+ 'psql',
+ '--no-psqlrc',
+ '--command' => "SELECT pg_backup_start('backup', true)",
+ '--command' => 'BASE_BACKUP',
+ '--dbname' => $connstr
],
qr/a backup is already in progress in this session/,
'BASE_BACKUP cannot run in session already running backup');
# Bump the transaction ID epoch. This is useful to stress the portability
# of recovery_target_xid parsing.
-system_or_bail('pg_resetwal', '--epoch', '1', $node_primary->data_dir);
+system_or_bail('pg_resetwal', '--epoch' => '1', $node_primary->data_dir);
# Start it
$node_primary->start;
my $res = run_log(
[
- 'pg_ctl', '-D', $node_standby->data_dir, '-l',
- $node_standby->logfile, 'start'
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
]);
ok(!$res, 'invalid recovery startup fails');
run_log(
[
- 'pg_ctl', '-D', $node_standby->data_dir, '-l',
- $node_standby->logfile, 'start'
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
]);
# wait for postgres to terminate
like(slurp_file($gnat->logfile),
$pre_existing_msg, 'detected live backend via shared memory');
# Reject single-user startup.
-my $single_stderr;
-ok( !run_log(
- [ 'postgres', '--single', '-D', $gnat->data_dir, 'template1' ],
- '<', \undef, '2>', \$single_stderr),
- 'live query blocks --single');
-print STDERR $single_stderr;
-like($single_stderr, $pre_existing_msg,
+command_fails_like(
+ [
+ 'postgres', '--single',
+ '-D' => $gnat->data_dir,
+ 'template1'
+ ],
+ $pre_existing_msg,
'single-user mode detected live backend via shared memory');
log_ipcs();
# that the server ends with an error during recovery.
run_log(
[
- 'pg_ctl', '-D',
- $recovery_node->data_dir, '-l',
- $recovery_node->logfile, 'start'
+ 'pg_ctl',
+ '--pgdata' => $recovery_node->data_dir,
+ '--log' => $recovery_node->logfile,
+ 'start',
]);
# wait for postgres to terminate
# Perform a logical dump of primary and standby, and check that they match
command_ok(
[
- 'pg_dumpall', '-f', $outputdir . '/primary.dump',
- '--no-sync', '-p', $node_primary->port,
- '--no-unlogged-table-data' # if unlogged, standby has schema only
+ 'pg_dumpall',
+ '--file' => $outputdir . '/primary.dump',
+ '--no-sync',
+ '--port' => $node_primary->port,
+ '--no-unlogged-table-data', # if unlogged, standby has schema only
],
'dump primary server');
command_ok(
[
- 'pg_dumpall', '-f', $outputdir . '/standby.dump',
- '--no-sync', '-p', $node_standby_1->port
+ 'pg_dumpall',
+ '--file' => $outputdir . '/standby.dump',
+ '--no-sync',
+ '--port' => $node_standby_1->port,
],
'dump standby server');
command_ok(
- [ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump' ],
+ [ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump', ],
'compare primary and standby dumps');
# Likewise for the catalogs of the regression database, after disabling
command_ok(
[
'pg_dump',
- ('--schema', 'pg_catalog'),
- ('-f', $outputdir . '/catalogs_primary.dump'),
+ '--schema' => 'pg_catalog',
+ '--file' => $outputdir . '/catalogs_primary.dump',
'--no-sync',
- ('-p', $node_primary->port),
+ '--port', $node_primary->port,
'--no-unlogged-table-data',
- 'regression'
+ 'regression',
],
'dump catalogs of primary server');
command_ok(
[
'pg_dump',
- ('--schema', 'pg_catalog'),
- ('-f', $outputdir . '/catalogs_standby.dump'),
+ '--schema' => 'pg_catalog',
+ '--file' => $outputdir . '/catalogs_standby.dump',
'--no-sync',
- ('-p', $node_standby_1->port),
- 'regression'
+ '--port' => $node_standby_1->port,
+ 'regression',
],
'dump catalogs of standby server');
command_ok(
[
'diff',
$outputdir . '/catalogs_primary.dump',
- $outputdir . '/catalogs_standby.dump'
+ $outputdir . '/catalogs_standby.dump',
],
'compare primary and standby catalog dumps');
# pg_stat_ssl
command_like(
[
- 'psql', '-X',
- '-A', '-F',
- ',', '-P',
- 'null=_null_', '-d',
- "$common_connstr sslrootcert=invalid", '-c',
- "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
+ 'psql',
+ '--no-psqlrc',
+ '--no-align',
+ '--field-separator' => ',',
+ '--pset', => 'null=_null_',
+ '--dbname' => "$common_connstr sslrootcert=invalid",
+ '--command' =>
+ "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,_null_,_null_,_null_\r?$}mx,
command_like(
[
'psql',
- '-X',
- '-A',
- '-F',
- ',',
- '-P',
- 'null=_null_',
- '-d',
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ '--no-psqlrc',
+ '--no-align',
+ '--field-separator' => ',',
+ '--pset' => 'null=_null_',
+ '--dbname' =>
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
. sslkey('client.key'),
- '-c',
- "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
+ '--command' =>
+ "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,/?CN=ssltestuser,$serialno,/?\QCN=Test CA for PostgreSQL SSL regression test client certs\E\r?$}mx,