MTEST_ARGS: --print-errorlogs --no-rebuild -C build
PGCTLTIMEOUT: 120 # avoids spurious failures during parallel tests
TEMP_CONFIG: ${CIRRUS_WORKING_DIR}/src/tools/ci/pg_ci_base.conf
- PG_TEST_EXTRA: kerberos ldap ssl libpq_encryption load_balance
+ PG_TEST_EXTRA: kerberos ldap ssl libpq_encryption load_balance oauth
# What files to preserve in case tests fail
chown root:postgres /tmp/cores
sysctl kern.corefile='/tmp/cores/%N.%P.core'
setup_additional_packages_script: |
- #pkg install -y ...
+ pkg install -y curl
# NB: Intentionally build without -Dllvm. The freebsd image size is already
# large enough to make VM startup slow, and even without llvm freebsd
--with-gssapi
--with-icu
--with-ldap
+ --with-libcurl
--with-libxml
--with-libxslt
--with-llvm
EOF
setup_additional_packages_script: |
- #apt-get update
- #DEBIAN_FRONTEND=noninteractive apt-get -y install ...
+ apt-get update
+ DEBIAN_FRONTEND=noninteractive apt-get -y install \
+ libcurl4-openssl-dev \
+ libcurl4-openssl-dev:i386 \
matrix:
- name: Linux - Debian Bookworm - Autoconf
folder: $CCACHE_DIR
setup_additional_packages_script: |
- #apt-get update
- #DEBIAN_FRONTEND=noninteractive apt-get -y install ...
+ apt-get update
+ DEBIAN_FRONTEND=noninteractive apt-get -y install libcurl4-openssl-dev
###
# Test that code can be built with gcc/clang without warnings
AC_SUBST(STRIP_STATIC_LIB)
AC_SUBST(STRIP_SHARED_LIB)
])# PGAC_CHECK_STRIP
+
+
+
+# PGAC_CHECK_LIBCURL
+# ------------------
+# Check for required libraries and headers, and test to see whether the current
+# installation of libcurl is thread-safe.
+
+AC_DEFUN([PGAC_CHECK_LIBCURL],
+[
+ AC_CHECK_HEADER(curl/curl.h, [],
+ [AC_MSG_ERROR([header file <curl/curl.h> is required for --with-libcurl])])
+ AC_CHECK_LIB(curl, curl_multi_init, [],
+ [AC_MSG_ERROR([library 'curl' does not provide curl_multi_init])])
+
+ # Check to see whether the current platform supports threadsafe Curl
+ # initialization.
+ AC_CACHE_CHECK([for curl_global_init thread safety], [pgac_cv__libcurl_threadsafe_init],
+ [AC_RUN_IFELSE([AC_LANG_PROGRAM([
+#include <curl/curl.h>
+],[
+ curl_version_info_data *info;
+
+ if (curl_global_init(CURL_GLOBAL_ALL))
+ return -1;
+
+ info = curl_version_info(CURLVERSION_NOW);
+#ifdef CURL_VERSION_THREADSAFE
+ if (info->features & CURL_VERSION_THREADSAFE)
+ return 0;
+#endif
+
+ return 1;
+])],
+ [pgac_cv__libcurl_threadsafe_init=yes],
+ [pgac_cv__libcurl_threadsafe_init=no],
+ [pgac_cv__libcurl_threadsafe_init=unknown])])
+ if test x"$pgac_cv__libcurl_threadsafe_init" = xyes ; then
+ AC_DEFINE(HAVE_THREADSAFE_CURL_GLOBAL_INIT, 1,
+ [Define to 1 if curl_global_init() is guaranteed to be thread-safe.])
+ fi
+
+ # Warn if a thread-friendly DNS resolver isn't built.
+ AC_CACHE_CHECK([for curl support for asynchronous DNS], [pgac_cv__libcurl_async_dns],
+ [AC_RUN_IFELSE([AC_LANG_PROGRAM([
+#include <curl/curl.h>
+],[
+ curl_version_info_data *info;
+
+ if (curl_global_init(CURL_GLOBAL_ALL))
+ return -1;
+
+ info = curl_version_info(CURLVERSION_NOW);
+ return (info->features & CURL_VERSION_ASYNCHDNS) ? 0 : 1;
+])],
+ [pgac_cv__libcurl_async_dns=yes],
+ [pgac_cv__libcurl_async_dns=no],
+ [pgac_cv__libcurl_async_dns=unknown])])
+ if test x"$pgac_cv__libcurl_async_dns" != xyes ; then
+ AC_MSG_WARN([
+*** The installed version of libcurl does not support asynchronous DNS
+*** lookups. Connection timeouts will not be honored during DNS resolution,
+*** which may lead to hangs in client programs.])
+ fi
+])# PGAC_CHECK_LIBCURL
XML2_CFLAGS
XML2_CONFIG
with_libxml
+LIBCURL_LIBS
+LIBCURL_CFLAGS
+with_libcurl
with_uuid
with_readline
with_systemd
with_libedit_preferred
with_uuid
with_ossp_uuid
+with_libcurl
with_libxml
with_libxslt
with_system_tzdata
PKG_CONFIG_LIBDIR
ICU_CFLAGS
ICU_LIBS
+LIBCURL_CFLAGS
+LIBCURL_LIBS
XML2_CONFIG
XML2_CFLAGS
XML2_LIBS
prefer BSD Libedit over GNU Readline
--with-uuid=LIB build contrib/uuid-ossp using LIB (bsd,e2fs,ossp)
--with-ossp-uuid obsolete spelling of --with-uuid=ossp
+ --with-libcurl build with libcurl support
--with-libxml build with XML support
--with-libxslt use XSLT support when building contrib/xml2
--with-system-tzdata=DIR
path overriding pkg-config's built-in search path
ICU_CFLAGS C compiler flags for ICU, overriding pkg-config
ICU_LIBS linker flags for ICU, overriding pkg-config
+ LIBCURL_CFLAGS
+ C compiler flags for LIBCURL, overriding pkg-config
+ LIBCURL_LIBS
+ linker flags for LIBCURL, overriding pkg-config
XML2_CONFIG path to xml2-config utility
XML2_CFLAGS C compiler flags for XML2, overriding pkg-config
XML2_LIBS linker flags for XML2, overriding pkg-config
+#
+# libcurl
+#
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build with libcurl support" >&5
+$as_echo_n "checking whether to build with libcurl support... " >&6; }
+
+
+
+# Check whether --with-libcurl was given.
+if test "${with_libcurl+set}" = set; then :
+ withval=$with_libcurl;
+ case $withval in
+ yes)
+
+$as_echo "#define USE_LIBCURL 1" >>confdefs.h
+
+ ;;
+ no)
+ :
+ ;;
+ *)
+ as_fn_error $? "no argument expected for --with-libcurl option" "$LINENO" 5
+ ;;
+ esac
+
+else
+ with_libcurl=no
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_libcurl" >&5
+$as_echo "$with_libcurl" >&6; }
+
+
+if test "$with_libcurl" = yes ; then
+ # Check for libcurl 7.61.0 or higher (corresponding to RHEL8 and the ability
+ # to explicitly set TLS 1.3 ciphersuites).
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for libcurl >= 7.61.0" >&5
+$as_echo_n "checking for libcurl >= 7.61.0... " >&6; }
+
+if test -n "$LIBCURL_CFLAGS"; then
+ pkg_cv_LIBCURL_CFLAGS="$LIBCURL_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcurl >= 7.61.0\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcurl >= 7.61.0") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCURL_CFLAGS=`$PKG_CONFIG --cflags "libcurl >= 7.61.0" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$LIBCURL_LIBS"; then
+ pkg_cv_LIBCURL_LIBS="$LIBCURL_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcurl >= 7.61.0\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcurl >= 7.61.0") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCURL_LIBS=`$PKG_CONFIG --libs "libcurl >= 7.61.0" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ LIBCURL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libcurl >= 7.61.0" 2>&1`
+ else
+ LIBCURL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libcurl >= 7.61.0" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$LIBCURL_PKG_ERRORS" >&5
+
+ as_fn_error $? "Package requirements (libcurl >= 7.61.0) were not met:
+
+$LIBCURL_PKG_ERRORS
+
+Consider adjusting the PKG_CONFIG_PATH environment variable if you
+installed software in a non-standard prefix.
+
+Alternatively, you may set the environment variables LIBCURL_CFLAGS
+and LIBCURL_LIBS to avoid the need to call pkg-config.
+See the pkg-config man page for more details." "$LINENO" 5
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it
+is in your PATH or set the PKG_CONFIG environment variable to the full
+path to pkg-config.
+
+Alternatively, you may set the environment variables LIBCURL_CFLAGS
+and LIBCURL_LIBS to avoid the need to call pkg-config.
+See the pkg-config man page for more details.
+
+To get pkg-config, see <http://pkg-config.freedesktop.org/>.
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ LIBCURL_CFLAGS=$pkg_cv_LIBCURL_CFLAGS
+ LIBCURL_LIBS=$pkg_cv_LIBCURL_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+fi
+
+ # We only care about -I, -D, and -L switches;
+ # note that -lcurl will be added by PGAC_CHECK_LIBCURL below.
+ for pgac_option in $LIBCURL_CFLAGS; do
+ case $pgac_option in
+ -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ esac
+ done
+ for pgac_option in $LIBCURL_LIBS; do
+ case $pgac_option in
+ -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ esac
+ done
+
+ # OAuth requires python for testing
+ if test "$with_python" != yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: *** OAuth support tests require --with-python to run" >&5
+$as_echo "$as_me: WARNING: *** OAuth support tests require --with-python to run" >&2;}
+ fi
+fi
+
+
#
# XML
#
fi
+# XXX libcurl must link after libgssapi_krb5 on FreeBSD to avoid segfaults
+# during gss_acquire_cred(). This is possibly related to Curl's Heimdal
+# dependency on that platform?
+if test "$with_libcurl" = yes ; then
+
+ ac_fn_c_check_header_mongrel "$LINENO" "curl/curl.h" "ac_cv_header_curl_curl_h" "$ac_includes_default"
+if test "x$ac_cv_header_curl_curl_h" = xyes; then :
+
+else
+ as_fn_error $? "header file <curl/curl.h> is required for --with-libcurl" "$LINENO" 5
+fi
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for curl_multi_init in -lcurl" >&5
+$as_echo_n "checking for curl_multi_init in -lcurl... " >&6; }
+if ${ac_cv_lib_curl_curl_multi_init+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lcurl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char curl_multi_init ();
+int
+main ()
+{
+return curl_multi_init ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_curl_curl_multi_init=yes
+else
+ ac_cv_lib_curl_curl_multi_init=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_curl_curl_multi_init" >&5
+$as_echo "$ac_cv_lib_curl_curl_multi_init" >&6; }
+if test "x$ac_cv_lib_curl_curl_multi_init" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBCURL 1
+_ACEOF
+
+ LIBS="-lcurl $LIBS"
+
+else
+ as_fn_error $? "library 'curl' does not provide curl_multi_init" "$LINENO" 5
+fi
+
+
+ # Check to see whether the current platform supports threadsafe Curl
+ # initialization.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for curl_global_init thread safety" >&5
+$as_echo_n "checking for curl_global_init thread safety... " >&6; }
+if ${pgac_cv__libcurl_threadsafe_init+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ pgac_cv__libcurl_threadsafe_init=unknown
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <curl/curl.h>
+
+int
+main ()
+{
+
+ curl_version_info_data *info;
+
+ if (curl_global_init(CURL_GLOBAL_ALL))
+ return -1;
+
+ info = curl_version_info(CURLVERSION_NOW);
+#ifdef CURL_VERSION_THREADSAFE
+ if (info->features & CURL_VERSION_THREADSAFE)
+ return 0;
+#endif
+
+ return 1;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ pgac_cv__libcurl_threadsafe_init=yes
+else
+ pgac_cv__libcurl_threadsafe_init=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__libcurl_threadsafe_init" >&5
+$as_echo "$pgac_cv__libcurl_threadsafe_init" >&6; }
+ if test x"$pgac_cv__libcurl_threadsafe_init" = xyes ; then
+
+$as_echo "#define HAVE_THREADSAFE_CURL_GLOBAL_INIT 1" >>confdefs.h
+
+ fi
+
+ # Warn if a thread-friendly DNS resolver isn't built.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for curl support for asynchronous DNS" >&5
+$as_echo_n "checking for curl support for asynchronous DNS... " >&6; }
+if ${pgac_cv__libcurl_async_dns+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ pgac_cv__libcurl_async_dns=unknown
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <curl/curl.h>
+
+int
+main ()
+{
+
+ curl_version_info_data *info;
+
+ if (curl_global_init(CURL_GLOBAL_ALL))
+ return -1;
+
+ info = curl_version_info(CURLVERSION_NOW);
+ return (info->features & CURL_VERSION_ASYNCHDNS) ? 0 : 1;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ pgac_cv__libcurl_async_dns=yes
+else
+ pgac_cv__libcurl_async_dns=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__libcurl_async_dns" >&5
+$as_echo "$pgac_cv__libcurl_async_dns" >&6; }
+ if test x"$pgac_cv__libcurl_async_dns" != xyes ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING:
+*** The installed version of libcurl does not support asynchronous DNS
+*** lookups. Connection timeouts will not be honored during DNS resolution,
+*** which may lead to hangs in client programs." >&5
+$as_echo "$as_me: WARNING:
+*** The installed version of libcurl does not support asynchronous DNS
+*** lookups. Connection timeouts will not be honored during DNS resolution,
+*** which may lead to hangs in client programs." >&2;}
+ fi
+
+fi
+
if test "$with_gssapi" = yes ; then
if test "$PORTNAME" != "win32"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gss_store_cred_into" >&5
AC_SUBST(with_uuid)
+#
+# libcurl
+#
+AC_MSG_CHECKING([whether to build with libcurl support])
+PGAC_ARG_BOOL(with, libcurl, no, [build with libcurl support],
+ [AC_DEFINE([USE_LIBCURL], 1, [Define to 1 to build with libcurl support. (--with-libcurl)])])
+AC_MSG_RESULT([$with_libcurl])
+AC_SUBST(with_libcurl)
+
+if test "$with_libcurl" = yes ; then
+ # Check for libcurl 7.61.0 or higher (corresponding to RHEL8 and the ability
+ # to explicitly set TLS 1.3 ciphersuites).
+ PKG_CHECK_MODULES(LIBCURL, [libcurl >= 7.61.0])
+
+ # We only care about -I, -D, and -L switches;
+ # note that -lcurl will be added by PGAC_CHECK_LIBCURL below.
+ for pgac_option in $LIBCURL_CFLAGS; do
+ case $pgac_option in
+ -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ esac
+ done
+ for pgac_option in $LIBCURL_LIBS; do
+ case $pgac_option in
+ -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ esac
+ done
+
+ # OAuth requires python for testing
+ if test "$with_python" != yes; then
+ AC_MSG_WARN([*** OAuth support tests require --with-python to run])
+ fi
+fi
+
+
#
# XML
#
Use --without-zlib to disable zlib support.])])
fi
+# XXX libcurl must link after libgssapi_krb5 on FreeBSD to avoid segfaults
+# during gss_acquire_cred(). This is possibly related to Curl's Heimdal
+# dependency on that platform?
+if test "$with_libcurl" = yes ; then
+ PGAC_CHECK_LIBCURL
+fi
+
if test "$with_gssapi" = yes ; then
if test "$PORTNAME" != "win32"; then
AC_SEARCH_LIBS(gss_store_cred_into, [gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'], [],
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><literal>oauth</literal></term>
+ <listitem>
+ <para>
+ Authorize and optionally authenticate using a third-party OAuth 2.0
+ identity provider. See <xref linkend="auth-oauth"/> for details.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</para>
only on OpenBSD).
</para>
</listitem>
+ <listitem>
+ <para>
+ <link linkend="auth-oauth">OAuth authorization/authentication</link>,
+ which relies on an external OAuth 2.0 identity provider.
+ </para>
+ </listitem>
</itemizedlist>
</para>
</note>
</sect1>
+ <sect1 id="auth-oauth">
+ <title>OAuth Authorization/Authentication</title>
+
+ <indexterm zone="auth-oauth">
+ <primary>OAuth Authorization/Authentication</primary>
+ </indexterm>
+
+ <para>
+ OAuth 2.0 is an industry-standard framework, defined in
+ <ulink url="https://datatracker.ietf.org/doc/html/rfc6749">RFC 6749</ulink>,
+ to enable third-party applications to obtain limited access to a protected
+ resource.
+
+ OAuth client support has to be enabled when <productname>PostgreSQL</productname>
+ is built, see <xref linkend="installation"/> for more information.
+ </para>
+
+ <para>
+ This documentation uses the following terminology when discussing the OAuth
+ ecosystem:
+
+ <variablelist>
+
+ <varlistentry>
+ <term>Resource Owner (or End User)</term>
+ <listitem>
+ <para>
+ The user or system who owns protected resources and can grant access to
+ them. This documentation also uses the term <emphasis>end user</emphasis>
+ when the resource owner is a person. When you use
+ <application>psql</application> to connect to the database using OAuth,
+ you are the resource owner/end user.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Client</term>
+ <listitem>
+ <para>
+ The system which accesses the protected resources using access
+ tokens. Applications using libpq, such as <application>psql</application>,
+ are the OAuth clients when connecting to a
+ <productname>PostgreSQL</productname> cluster.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Resource Server</term>
+ <listitem>
+ <para>
+ The system hosting the protected resources which are
+ accessed by the client. The <productname>PostgreSQL</productname>
+ cluster being connected to is the resource server.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Provider</term>
+ <listitem>
+ <para>
+ The organization, product vendor, or other entity which develops and/or
+ administers the OAuth authorization servers and clients for a given application.
+ Different providers typically choose different implementation details
+ for their OAuth systems; a client of one provider is not generally
+ guaranteed to have access to the servers of another.
+ </para>
+ <para>
+ This use of the term "provider" is not standard, but it seems to be in
+ wide use colloquially. (It should not be confused with OpenID's similar
+ term "Identity Provider". While the implementation of OAuth in
+ <productname>PostgreSQL</productname> is intended to be interoperable
+ and compatible with OpenID Connect/OIDC, it is not itself an OIDC client
+ and does not require its use.)
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Authorization Server</term>
+ <listitem>
+ <para>
+ The system which receives requests from, and issues access tokens to,
+ the client after the authenticated resource owner has given approval.
+ <productname>PostgreSQL</productname> does not provide an authorization
+ server; it is the responsibility of the OAuth provider.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term id="auth-oauth-issuer">Issuer</term>
+ <listitem>
+ <para>
+ An identifier for an authorization server, printed as an
+ <literal>https://</literal> URL, which provides a trusted "namespace"
+ for OAuth clients and applications. The issuer identifier allows a
+ single authorization server to talk to the clients of mutually
+ untrusting entities, as long as they maintain separate issuers.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <note>
+ <para>
+ For small deployments, there may not be a meaningful distinction between
+ the "provider", "authorization server", and "issuer". However, for more
+ complicated setups, there may be a one-to-many (or many-to-many)
+ relationship: a provider may rent out multiple issuer identifiers to
+ separate tenants, then provide multiple authorization servers, possibly
+ with different supported feature sets, to interact with their clients.
+ </para>
+ </note>
+ </para>
+
+ <para>
+ <productname>PostgreSQL</productname> supports bearer tokens, defined in
+ <ulink url="https://datatracker.ietf.org/doc/html/rfc6750">RFC 6750</ulink>,
+ which are a type of access token used with OAuth 2.0 where the token is an
+ opaque string. The format of the access token is implementation specific
+ and is chosen by each authorization server.
+ </para>
+
+ <para>
+ The following configuration options are supported for OAuth:
+ <variablelist>
+ <varlistentry>
+ <term><literal>issuer</literal></term>
+ <listitem>
+ <para>
+ An HTTPS URL which is either the exact
+ <link linkend="auth-oauth-issuer">issuer identifier</link> of the
+ authorization server, as defined by its discovery document, or a
+ well-known URI that points directly to that discovery document. This
+ parameter is required.
+ </para>
+ <para>
+ When an OAuth client connects to the server, a URL for the discovery
+ document will be constructed using the issuer identifier. By default,
+ this URL uses the conventions of OpenID Connect Discovery: the path
+ <literal>/.well-known/openid-configuration</literal> will be appended
+ to the end of the issuer identifier. Alternatively, if the
+ <literal>issuer</literal> contains a <literal>/.well-known/</literal>
+ path segment, that URL will be provided to the client as-is.
+ </para>
+ <warning>
+ <para>
+ The OAuth client in libpq requires the server's issuer setting to
+ exactly match the issuer identifier which is provided in the discovery
+ document, which must in turn match the client's
+ <xref linkend="libpq-connect-oauth-issuer"/> setting. No variations in
+ case or formatting are permitted.
+ </para>
+ </warning>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>scope</literal></term>
+ <listitem>
+ <para>
+ A space-separated list of the OAuth scopes needed for the server to
+ both authorize the client and authenticate the user. Appropriate values
+ are determined by the authorization server and the OAuth validation
+ module used (see <xref linkend="oauth-validators" /> for more
+ information on validators). This parameter is required.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>validator</literal></term>
+ <listitem>
+ <para>
+ The library to use for validating bearer tokens. If given, the name must
+ exactly match one of the libraries listed in
+ <xref linkend="guc-oauth-validator-libraries" />. This parameter is
+ optional unless <literal>oauth_validator_libraries</literal> contains
+ more than one library, in which case it is required.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>map</literal></term>
+ <listitem>
+ <para>
+ Allows for mapping between OAuth identity provider and database user
+ names. See <xref linkend="auth-username-maps"/> for details. If a
+ map is not specified, the user name associated with the token (as
+ determined by the OAuth validator) must exactly match the role name
+ being requested. This parameter is optional.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term id="auth-oauth-delegate-ident-mapping" xreflabel="delegate_ident_mapping">
+ <literal>delegate_ident_mapping</literal>
+ </term>
+ <listitem>
+ <para>
+ An advanced option which is not intended for common use.
+ </para>
+ <para>
+ When set to <literal>1</literal>, standard user mapping with
+ <filename>pg_ident.conf</filename> is skipped, and the OAuth validator
+ takes full responsibility for mapping end user identities to database
+ roles. If the validator authorizes the token, the server trusts that
+ the user is allowed to connect under the requested role, and the
+ connection is allowed to proceed regardless of the authentication
+ status of the user.
+ </para>
+ <para>
+ This parameter is incompatible with <literal>map</literal>.
+ </para>
+ <warning>
+ <para>
+ <literal>delegate_ident_mapping</literal> provides additional
+ flexibility in the design of the authentication system, but it also
+ requires careful implementation of the OAuth validator, which must
+ determine whether the provided token carries sufficient end-user
+ privileges in addition to the <link linkend="oauth-validators">standard
+ checks</link> required of all validators. Use with caution.
+ </para>
+ </warning>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </sect1>
+
<sect1 id="client-authentication-problems">
<title>Authentication Problems</title>
</para>
</listitem>
</varlistentry>
+
+ <varlistentry id="guc-oauth-validator-libraries" xreflabel="oauth_validator_libraries">
+ <term><varname>oauth_validator_libraries</varname> (<type>string</type>)
+ <indexterm>
+ <primary><varname>oauth_validator_libraries</varname> configuration parameter</primary>
+ </indexterm>
+ </term>
+ <listitem>
+ <para>
+ The library/libraries to use for validating OAuth connection tokens. If
+ only one validator library is provided, it will be used by default for
+ any OAuth connections; otherwise, all
+ <link linkend="auth-oauth"><literal>oauth</literal> HBA entries</link>
+ must explicitly set a <literal>validator</literal> chosen from this
+ list. If set to an empty string (the default), OAuth connections will be
+ refused. This parameter can only be set in the
+ <filename>postgresql.conf</filename> file.
+ </para>
+ <para>
+ Validator modules must be implemented/obtained separately;
+ <productname>PostgreSQL</productname> does not ship with any default
+ implementations. For more information on implementing OAuth validators,
+ see <xref linkend="oauth-validators" />.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</sect2>
<!ENTITY generic-wal SYSTEM "generic-wal.sgml">
<!ENTITY custom-rmgr SYSTEM "custom-rmgr.sgml">
<!ENTITY backup-manifest SYSTEM "backup-manifest.sgml">
+<!ENTITY oauth-validators SYSTEM "oauth-validators.sgml">
<!-- contrib information -->
<!ENTITY contrib SYSTEM "contrib.sgml">
</listitem>
</varlistentry>
+ <varlistentry id="configure-option-with-libcurl">
+ <term><option>--with-libcurl</option></term>
+ <listitem>
+ <para>
+ Build with libcurl support for OAuth 2.0 client flows.
+ Libcurl version 7.61.0 or later is required for this feature.
+ Building with this will check for the required header files
+ and libraries to make sure that your <productname>curl</productname>
+ installation is sufficient before proceeding.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="configure-option-with-libxml">
<term><option>--with-libxml</option></term>
<listitem>
</listitem>
</varlistentry>
+ <varlistentry id="configure-with-libcurl-meson">
+ <term><option>-Dlibcurl={ auto | enabled | disabled }</option></term>
+ <listitem>
+ <para>
+ Build with libcurl support for OAuth 2.0 client flows.
+ Libcurl version 7.61.0 or later is required for this feature.
+ Building with this will check for the required header files
+ and libraries to make sure that your <productname>Curl</productname>
+ installation is sufficient before proceeding. The default for this
+ option is auto.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="configure-with-libxml-meson">
<term><option>-Dlibxml={ auto | enabled | disabled }</option></term>
<listitem>
</listitem>
</varlistentry>
+ <varlistentry>
+ <term><literal>oauth</literal></term>
+ <listitem>
+ <para>
+ The server must request an OAuth bearer token from the client.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry>
<term><literal>none</literal></term>
<listitem>
</para>
</listitem>
</varlistentry>
+
+ <varlistentry id="libpq-connect-oauth-issuer" xreflabel="oauth_issuer">
+ <term><literal>oauth_issuer</literal></term>
+ <listitem>
+ <para>
+ The HTTPS URL of a trusted issuer to contact if the server requests an
+ OAuth token for the connection. This parameter is required for all OAuth
+ connections; it should exactly match the <literal>issuer</literal>
+ setting in <link linkend="auth-oauth">the server's HBA configuration</link>.
+ </para>
+ <para>
+ As part of the standard authentication handshake, <application>libpq</application>
+ will ask the server for a <emphasis>discovery document:</emphasis> a URL
+ providing a set of OAuth configuration parameters. The server must
+ provide a URL that is directly constructed from the components of the
+ <literal>oauth_issuer</literal>, and this value must exactly match the
+ issuer identifier that is declared in the discovery document itself, or
+ the connection will fail. This is required to prevent a class of
+ <ulink url="https://mailarchive.ietf.org/arch/msg/oauth/JIVxFBGsJBVtm7ljwJhPUm3Fr-w/">
+ "mix-up attacks"</ulink> on OAuth clients.
+ </para>
+ <para>
+ You may also explicitly set <literal>oauth_issuer</literal> to the
+ <literal>/.well-known/</literal> URI used for OAuth discovery. In this
+ case, if the server asks for a different URL, the connection will fail,
+ but a <link linkend="libpq-oauth-authdata-hooks">custom OAuth flow</link>
+ may be able to speed up the standard handshake by using previously
+ cached tokens. (In this case, it is recommended that
+ <xref linkend="libpq-connect-oauth-scope"/> be set as well, since the
+ client will not have a chance to ask the server for a correct scope
+ setting, and the default scopes for a token may not be sufficient to
+ connect.) <application>libpq</application> currently supports the
+ following well-known endpoints:
+ <itemizedlist spacing="compact">
+ <listitem><para><literal>/.well-known/openid-configuration</literal></para></listitem>
+ <listitem><para><literal>/.well-known/oauth-authorization-server</literal></para></listitem>
+ </itemizedlist>
+ </para>
+ <warning>
+ <para>
+ Issuers are highly privileged during the OAuth connection handshake. As
+ a rule of thumb, if you would not trust the operator of a URL to handle
+ access to your servers, or to impersonate you directly, that URL should
+ not be trusted as an <literal>oauth_issuer</literal>.
+ </para>
+ </warning>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="libpq-connect-oauth-client-id" xreflabel="oauth_client_id">
+ <term><literal>oauth_client_id</literal></term>
+ <listitem>
+ <para>
+ An OAuth 2.0 client identifier, as issued by the authorization server.
+ If the <productname>PostgreSQL</productname> server
+ <link linkend="auth-oauth">requests an OAuth token</link> for the
+ connection (and if no <link linkend="libpq-oauth-authdata-hooks">custom
+ OAuth hook</link> is installed to provide one), then this parameter must
+ be set; otherwise, the connection will fail.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="libpq-connect-oauth-client-secret" xreflabel="oauth_client_secret">
+ <term><literal>oauth_client_secret</literal></term>
+ <listitem>
+ <para>
+ The client password, if any, to use when contacting the OAuth
+ authorization server. Whether this parameter is required or not is
+ determined by the OAuth provider; "public" clients generally do not use
+ a secret, whereas "confidential" clients generally do.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="libpq-connect-oauth-scope" xreflabel="oauth_scope">
+ <term><literal>oauth_scope</literal></term>
+ <listitem>
+ <para>
+ The scope of the access request sent to the authorization server,
+ specified as a (possibly empty) space-separated list of OAuth scope
+ identifiers. This parameter is optional and intended for advanced usage.
+ </para>
+ <para>
+ Usually the client will obtain appropriate scope settings from the
+ <productname>PostgreSQL</productname> server. If this parameter is used,
+ the server's requested scope list will be ignored. This can prevent a
+ less-trusted server from requesting inappropriate access scopes from the
+ end user. However, if the client's scope setting does not contain the
+ server's required scopes, the server is likely to reject the issued
+ token, and the connection will fail.
+ </para>
+ <para>
+ The meaning of an empty scope list is provider-dependent. An OAuth
+ authorization server may choose to issue a token with "default scope",
+ whatever that happens to be, or it may reject the token request
+ entirely.
+ </para>
+ </listitem>
+ </varlistentry>
+
</variablelist>
</para>
</sect2>
</sect1>
+ <sect1 id="libpq-oauth">
+ <title>OAuth Support</title>
+
+ <para>
+ libpq implements support for the OAuth v2 Device Authorization client flow,
+ documented in
+ <ulink url="https://datatracker.ietf.org/doc/html/rfc8628">RFC 8628</ulink>,
+ which it will attempt to use by default if the server
+ <link linkend="auth-oauth">requests a bearer token</link> during
+ authentication. This flow can be utilized even if the system running the
+ client application does not have a usable web browser, for example when
+ running a client via <application>SSH</application>. Client applications may implement their own flows
+ instead; see <xref linkend="libpq-oauth-authdata-hooks"/>.
+ </para>
+ <para>
+ The builtin flow will, by default, print a URL to visit and a user code to
+ enter there:
+<programlisting>
+$ psql 'dbname=postgres oauth_issuer=https://example.com oauth_client_id=...'
+Visit https://example.com/device and enter the code: ABCD-EFGH
+</programlisting>
+ (This prompt may be
+ <link linkend="libpq-oauth-authdata-prompt-oauth-device">customized</link>.)
+ The user will then log into their OAuth provider, which will ask whether
+ to allow libpq and the server to perform actions on their behalf. It is always
+ a good idea to carefully review the URL and permissions displayed, to ensure
+ they match expectations, before continuing. Permissions should not be given
+ to untrusted third parties.
+ </para>
+ <para>
+ For an OAuth client flow to be usable, the connection string must at minimum
+ contain <xref linkend="libpq-connect-oauth-issuer"/> and
+ <xref linkend="libpq-connect-oauth-client-id"/>. (These settings are
+ determined by your organization's OAuth provider.) The builtin flow
+ additionally requires the OAuth authorization server to publish a device
+ authorization endpoint.
+ </para>
+
+ <note>
+ <para>
+ The builtin Device Authorization flow is not currently supported on Windows.
+ Custom client flows may still be implemented.
+ </para>
+ </note>
+
+ <sect2 id="libpq-oauth-authdata-hooks">
+ <title>Authdata Hooks</title>
+
+ <para>
+ The behavior of the OAuth flow may be modified or replaced by a client using
+ the following hook API:
+
+ <variablelist>
+ <varlistentry id="libpq-PQsetAuthDataHook">
+ <term><function>PQsetAuthDataHook</function><indexterm><primary>PQsetAuthDataHook</primary></indexterm></term>
+
+ <listitem>
+ <para>
+ Sets the <symbol>PGauthDataHook</symbol>, overriding
+ <application>libpq</application>'s handling of one or more aspects of
+ its OAuth client flow.
+<synopsis>
+void PQsetAuthDataHook(PQauthDataHook_type hook);
+</synopsis>
+ If <replaceable>hook</replaceable> is <literal>NULL</literal>, the
+ default handler will be reinstalled. Otherwise, the application passes
+ a pointer to a callback function with the signature:
+<programlisting>
+int hook_fn(PGauthData type, PGconn *conn, void *data);
+</programlisting>
+ which <application>libpq</application> will call when an action is
+ required of the application. <replaceable>type</replaceable> describes
+ the request being made, <replaceable>conn</replaceable> is the
+ connection handle being authenticated, and <replaceable>data</replaceable>
+ points to request-specific metadata. The contents of this pointer are
+ determined by <replaceable>type</replaceable>; see
+ <xref linkend="libpq-oauth-authdata-hooks-types"/> for the supported
+ list.
+ </para>
+ <para>
+ Hooks can be chained together to allow cooperative and/or fallback
+ behavior. In general, a hook implementation should examine the incoming
+ <replaceable>type</replaceable> (and, potentially, the request metadata
+ and/or the settings for the particular <replaceable>conn</replaceable>
+ in use) to decide whether or not to handle a specific piece of authdata.
+ If not, it should delegate to the previous hook in the chain
+ (retrievable via <function>PQgetAuthDataHook</function>).
+ </para>
+ <para>
+ Success is indicated by returning an integer greater than zero.
+ Returning a negative integer signals an error condition and abandons the
+ connection attempt. (A zero value is reserved for the default
+ implementation.)
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="libpq-PQgetAuthDataHook">
+ <term><function>PQgetAuthDataHook</function><indexterm><primary>PQgetAuthDataHook</primary></indexterm></term>
+
+ <listitem>
+ <para>
+ Retrieves the current value of <symbol>PGauthDataHook</symbol>.
+<synopsis>
+PQauthDataHook_type PQgetAuthDataHook(void);
+</synopsis>
+ At initialization time (before the first call to
+ <function>PQsetAuthDataHook</function>), this function will return
+ <symbol>PQdefaultAuthDataHook</symbol>.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <sect3 id="libpq-oauth-authdata-hooks-types">
+ <title>Hook Types</title>
+ <para>
+ The following <symbol>PGauthData</symbol> types and their corresponding
+ <replaceable>data</replaceable> structures are defined:
+
+ <variablelist>
+ <varlistentry id="libpq-oauth-authdata-prompt-oauth-device">
+ <term>
+ <symbol>PQAUTHDATA_PROMPT_OAUTH_DEVICE</symbol>
+ <indexterm><primary>PQAUTHDATA_PROMPT_OAUTH_DEVICE</primary></indexterm>
+ </term>
+ <listitem>
+ <para>
+ Replaces the default user prompt during the builtin device
+ authorization client flow. <replaceable>data</replaceable> points to
+ an instance of <symbol>PGpromptOAuthDevice</symbol>:
+<synopsis>
+typedef struct _PGpromptOAuthDevice
+{
+ const char *verification_uri; /* verification URI to visit */
+ const char *user_code; /* user code to enter */
+ const char *verification_uri_complete; /* optional combination of URI and
+ * code, or NULL */
+ int expires_in; /* seconds until user code expires */
+} PGpromptOAuthDevice;
+</synopsis>
+ </para>
+ <para>
+ The OAuth Device Authorization flow included in <application>libpq</application>
+ requires the end user to visit a URL with a browser, then enter a code
+ which permits <application>libpq</application> to connect to the server
+ on their behalf. The default prompt simply prints the
+ <literal>verification_uri</literal> and <literal>user_code</literal>
+ on standard error. Replacement implementations may display this
+ information using any preferred method, for example with a GUI.
+ </para>
+ <para>
+ This callback is only invoked during the builtin device
+ authorization flow. If the application installs a
+ <link linkend="libpq-oauth-authdata-oauth-bearer-token">custom OAuth
+ flow</link>, this authdata type will not be used.
+ </para>
+ <para>
+ If a non-NULL <structfield>verification_uri_complete</structfield> is
+ provided, it may optionally be used for non-textual verification (for
+ example, by displaying a QR code). The URL and user code should still
+ be displayed to the end user in this case, because the code will be
+ manually confirmed by the provider, and the URL lets users continue
+ even if they can't use the non-textual method. For more information,
+ see section 3.3.1 in
+ <ulink url="https://datatracker.ietf.org/doc/html/rfc8628#section-3.3.1">RFC 8628</ulink>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="libpq-oauth-authdata-oauth-bearer-token">
+ <term>
+ <symbol>PQAUTHDATA_OAUTH_BEARER_TOKEN</symbol>
+ <indexterm><primary>PQAUTHDATA_OAUTH_BEARER_TOKEN</primary></indexterm>
+ </term>
+ <listitem>
+ <para>
+ Replaces the entire OAuth flow with a custom implementation. The hook
+ should either directly return a Bearer token for the current
+ user/issuer/scope combination, if one is available without blocking, or
+ else set up an asynchronous callback to retrieve one.
+ </para>
+ <para>
+ <replaceable>data</replaceable> points to an instance
+ of <symbol>PGoauthBearerRequest</symbol>, which should be filled in
+ by the implementation:
+<synopsis>
+typedef struct _PGoauthBearerRequest
+{
+ /* Hook inputs (constant across all calls) */
+ const char *const openid_configuration; /* OIDC discovery URL */
+ const char *const scope; /* required scope(s), or NULL */
+
+ /* Hook outputs */
+
+ /* Callback implementing a custom asynchronous OAuth flow. */
+ PostgresPollingStatusType (*async) (PGconn *conn,
+ struct _PGoauthBearerRequest *request,
+ SOCKTYPE *altsock);
+
+ /* Callback to clean up custom allocations. */
+ void (*cleanup) (PGconn *conn, struct _PGoauthBearerRequest *request);
+
+ char *token; /* acquired Bearer token */
+ void *user; /* hook-defined allocated data */
+} PGoauthBearerRequest;
+</synopsis>
+ </para>
+ <para>
+ Two pieces of information are provided to the hook by
+ <application>libpq</application>:
+ <replaceable>openid_configuration</replaceable> contains the URL of an
+ OAuth discovery document describing the authorization server's
+ supported flows, and <replaceable>scope</replaceable> contains a
+ (possibly empty) space-separated list of OAuth scopes which are
+ required to access the server. Either or both may be
+ <literal>NULL</literal> to indicate that the information was not
+ discoverable. (In this case, implementations may be able to establish
+ the requirements using some other preconfigured knowledge, or they may
+ choose to fail.)
+ </para>
+ <para>
+ The final output of the hook is <replaceable>token</replaceable>, which
+ must point to a valid Bearer token for use on the connection. (This
+ token should be issued by the
+ <xref linkend="libpq-connect-oauth-issuer"/> and hold the requested
+ scopes, or the connection will be rejected by the server's validator
+ module.) The allocated token string must remain valid until
+ <application>libpq</application> is finished connecting; the hook
+ should set a <replaceable>cleanup</replaceable> callback which will be
+ called when <application>libpq</application> no longer requires it.
+ </para>
+ <para>
+ If an implementation cannot immediately produce a
+ <replaceable>token</replaceable> during the initial call to the hook,
+ it should set the <replaceable>async</replaceable> callback to handle
+ nonblocking communication with the authorization server.
+ <footnote>
+ <para>
+ Performing blocking operations during the
+ <symbol>PQAUTHDATA_OAUTH_BEARER_TOKEN</symbol> hook callback will
+ interfere with nonblocking connection APIs such as
+ <function>PQconnectPoll</function> and prevent concurrent connections
+ from making progress. Applications which only ever use the
+ synchronous connection primitives, such as
+ <function>PQconnectdb</function>, may synchronously retrieve a token
+ during the hook instead of implementing the
+ <replaceable>async</replaceable> callback, but they will necessarily
+ be limited to one connection at a time.
+ </para>
+ </footnote>
+ This will be called to begin the flow immediately upon return from the
+ hook. When the callback cannot make further progress without blocking,
+ it should return either <symbol>PGRES_POLLING_READING</symbol> or
+ <symbol>PGRES_POLLING_WRITING</symbol> after setting
+ <literal>*pgsocket</literal> to the file descriptor that will be marked
+ ready to read/write when progress can be made again. (This descriptor
+ is then provided to the top-level polling loop via
+ <function>PQsocket()</function>.) Return <symbol>PGRES_POLLING_OK</symbol>
+ after setting <replaceable>token</replaceable> when the flow is
+ complete, or <symbol>PGRES_POLLING_FAILED</symbol> to indicate failure.
+ </para>
+ <para>
+ Implementations may wish to store additional data for bookkeeping
+ across calls to the <replaceable>async</replaceable> and
+ <replaceable>cleanup</replaceable> callbacks. The
+ <replaceable>user</replaceable> pointer is provided for this purpose;
+ <application>libpq</application> will not touch its contents and the
+ application may use it at its convenience. (Remember to free any
+ allocations during token cleanup.)
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </sect3>
+ </sect2>
+
+ <sect2 id="libpq-oauth-debugging">
+ <title>Debugging and Developer Settings</title>
+
+ <para>
+ A "dangerous debugging mode" may be enabled by setting the environment
+ variable <envar>PGOAUTHDEBUG=UNSAFE</envar>. This functionality is provided
+ for ease of local development and testing only. It does several things that
+ you will not want a production system to do:
+
+ <itemizedlist spacing="compact">
+ <listitem>
+ <para>
+ permits the use of unencrypted HTTP during the OAuth provider exchange
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ allows the system's trusted CA list to be completely replaced using the
+ <envar>PGOAUTHCAFILE</envar> environment variable
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ prints HTTP traffic (containing several critical secrets) to standard
+ error during the OAuth flow
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ permits the use of zero-second retry intervals, which can cause the
+ client to busy-loop and pointlessly consume CPU
+ </para>
+ </listitem>
+ </itemizedlist>
+ </para>
+ <warning>
+ <para>
+ Do not share the output of the OAuth flow traffic with third parties. It
+ contains secrets that can be used to attack your clients and servers.
+ </para>
+ </warning>
+ </sect2>
+ </sect1>
+
<sect1 id="libpq-threading">
<title>Behavior in Threaded Programs</title>
<application>libpq</application> source code for a way to do cooperative
locking between <application>libpq</application> and your application.
</para>
+
+ <para>
+ Similarly, if you are using <productname>Curl</productname> inside your application,
+ <emphasis>and</emphasis> you do not already
+ <ulink url="https://curl.se/libcurl/c/curl_global_init.html">initialize
+ libcurl globally</ulink> before starting new threads, you will need to
+ cooperatively lock (again via <function>PQregisterThreadLock</function>)
+ around any code that may initialize libcurl. This restriction is lifted for
+ more recent versions of <productname>Curl</productname> that are built to support thread-safe
+ initialization; those builds can be identified by the advertisement of a
+ <literal>threadsafe</literal> feature in their version metadata.
+ </para>
</sect1>
--- /dev/null
+<!-- doc/src/sgml/oauth-validators.sgml -->
+
+<chapter id="oauth-validators">
+ <title>OAuth Validator Modules</title>
+ <indexterm zone="oauth-validators">
+ <primary>OAuth Validators</primary>
+ </indexterm>
+ <para>
+ <productname>PostgreSQL</productname> provides infrastructure for creating
+ custom modules to perform server-side validation of OAuth bearer tokens.
+ Because OAuth implementations vary so wildly, and bearer token validation is
+ heavily dependent on the issuing party, the server cannot check the token
+ itself; validator modules provide the integration layer between the server
+ and the OAuth provider in use.
+ </para>
+ <para>
+ OAuth validator modules must at least consist of an initialization function
+ (see <xref linkend="oauth-validator-init"/>) and the required callback for
+ performing validation (see <xref linkend="oauth-validator-callback-validate"/>).
+ </para>
+ <warning>
+ <para>
+ Since a misbehaving validator might let unauthorized users into the database,
+ correct implementation is crucial for server safety. See
+ <xref linkend="oauth-validator-design"/> for design considerations.
+ </para>
+ </warning>
+
+ <sect1 id="oauth-validator-design">
+ <title>Safely Designing a Validator Module</title>
+ <warning>
+ <para>
+ Read and understand the entirety of this section before implementing a
+ validator module. A malfunctioning validator is potentially worse than no
+ authentication at all, both because of the false sense of security it
+ provides, and because it may contribute to attacks against other pieces of
+ an OAuth ecosystem.
+ </para>
+ </warning>
+
+ <sect2 id="oauth-validator-design-responsibilities">
+ <title>Validator Responsibilities</title>
+ <para>
+ Although different modules may take very different approaches to token
+ validation, implementations generally need to perform three separate
+ actions:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Validate the Token</term>
+ <listitem>
+ <para>
+ The validator must first ensure that the presented token is in fact a
+ valid Bearer token for use in client authentication. The correct way to
+ do this depends on the provider, but it generally involves either
+ cryptographic operations to prove that the token was created by a trusted
+ party (offline validation), or the presentation of the token to that
+ trusted party so that it can perform validation for you (online
+ validation).
+ </para>
+ <para>
+ Online validation, usually implemented via
+ <ulink url="https://datatracker.ietf.org/doc/html/rfc7662">OAuth Token
+ Introspection</ulink>, requires fewer steps of a validator module and
+ allows central revocation of a token in the event that it is stolen
+ or misissued. However, it does require the module to make at least one
+ network call per authentication attempt (all of which must complete
+ within the configured <xref linkend="guc-authentication-timeout"/>).
+ Additionally, your provider may not provide introspection endpoints for
+ use by external resource servers.
+ </para>
+ <para>
+ Offline validation is much more involved, typically requiring a validator
+ to maintain a list of trusted signing keys for a provider and then
+ check the token's cryptographic signature along with its contents.
+ Implementations must follow the provider's instructions to the letter,
+ including any verification of issuer ("where is this token from?"),
+ audience ("who is this token for?"), and validity period ("when can this
+ token be used?"). Since there is no communication between the module and
+ the provider, tokens cannot be centrally revoked using this method;
+ offline validator implementations may wish to place restrictions on the
+ maximum length of a token's validity period.
+ </para>
+ <para>
+ If the token cannot be validated, the module should immediately fail.
+ Further authentication/authorization is pointless if the bearer token
+ wasn't issued by a trusted party.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Authorize the Client</term>
+ <listitem>
+ <para>
+ Next the validator must ensure that the end user has given the client
+ permission to access the server on their behalf. This generally involves
+ checking the scopes that have been assigned to the token, to make sure
+ that they cover database access for the current HBA parameters.
+ </para>
+ <para>
+ The purpose of this step is to prevent an OAuth client from obtaining a
+ token under false pretenses. If the validator requires all tokens to
+ carry scopes that cover database access, the provider should then loudly
+ prompt the user to grant that access during the flow. This gives them the
+ opportunity to reject the request if the client isn't supposed to be
+ using their credentials to connect to databases.
+ </para>
+ <para>
+ While it is possible to establish client authorization without explicit
+ scopes by using out-of-band knowledge of the deployed architecture, doing
+ so removes the user from the loop, which prevents them from catching
+ deployment mistakes and allows any such mistakes to be exploited
+ silently. Access to the database must be tightly restricted to only
+ trusted clients
+ <footnote>
+ <para>
+ That is, "trusted" in the sense that the OAuth client and the
+ <productname>PostgreSQL</productname> server are controlled by the same
+ entity. Notably, the Device Authorization client flow supported by
+ libpq does not usually meet this bar, since it's designed for use by
+ public/untrusted clients.
+ </para>
+ </footnote>
+ if users are not prompted for additional scopes.
+ </para>
+ <para>
+ Even if authorization fails, a module may choose to continue to pull
+ authentication information from the token for use in auditing and
+ debugging.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Authenticate the End User</term>
+ <listitem>
+ <para>
+ Finally, the validator should determine a user identifier for the token,
+ either by asking the provider for this information or by extracting it
+ from the token itself, and return that identifier to the server (which
+ will then make a final authorization decision using the HBA
+ configuration). This identifier will be available within the session via
+ <link linkend="functions-info-session-table"><function>system_user</function></link>
+ and recorded in the server logs if <xref linkend="guc-log-connections"/>
+ is enabled.
+ </para>
+ <para>
+ Different providers may record a variety of different authentication
+ information for an end user, typically referred to as
+ <emphasis>claims</emphasis>. Providers usually document which of these
+ claims are trustworthy enough to use for authorization decisions and
+ which are not. (For instance, it would probably not be wise to use an
+ end user's full name as the identifier for authentication, since many
+ providers allow users to change their display names arbitrarily.)
+ Ultimately, the choice of which claim (or combination of claims) to use
+ comes down to the provider implementation and application requirements.
+ </para>
+ <para>
+ Note that anonymous/pseudonymous login is possible as well, by enabling
+ usermap delegation; see
+ <xref linkend="oauth-validator-design-usermap-delegation"/>.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </sect2>
+
+ <sect2 id="oauth-validator-design-guidelines">
+ <title>General Coding Guidelines</title>
+ <para>
+ Developers should keep the following in mind when implementing token
+ validation:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Token Confidentiality</term>
+ <listitem>
+ <para>
+ Modules should not write tokens, or pieces of tokens, into the server
+ log. This is true even if the module considers the token invalid; an
+ attacker who confuses a client into communicating with the wrong provider
+ should not be able to retrieve that (otherwise valid) token from the
+ disk.
+ </para>
+ <para>
+ Implementations that send tokens over the network (for example, to
+ perform online token validation with a provider) must authenticate the
+ peer and ensure that strong transport security is in use.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Logging</term>
+ <listitem>
+ <para>
+ Modules may use the same <link linkend="error-message-reporting">logging
+ facilities</link> as standard extensions; however, the rules for emitting
+ log entries to the client are subtly different during the authentication
+ phase of the connection. Generally speaking, modules should log
+ verification problems at the <symbol>COMMERROR</symbol> level and return
+ normally, instead of using <symbol>ERROR</symbol>/<symbol>FATAL</symbol>
+ to unwind the stack, to avoid leaking information to unauthenticated
+ clients.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Interruptibility</term>
+ <listitem>
+ <para>
+ Modules must remain interruptible by signals so that the server can
+ correctly handle authentication timeouts and shutdown signals from
+ <application>pg_ctl</application>. For example, a module receiving
+ <symbol>EINTR</symbol>/<symbol>EAGAIN</symbol> from a blocking call
+ should call <function>CHECK_FOR_INTERRUPTS()</function> before retrying.
+ The same should be done during any long-running loops. Failure to follow
+ this guidance may result in unresponsive backend sessions.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Testing</term>
+ <listitem>
+ <para>
+ The breadth of testing an OAuth system is well beyond the scope of this
+ documentation, but at minimum, negative testing should be considered
+ mandatory. It's trivial to design a module that lets authorized users in;
+ the whole point of the system is to keep unauthorized users out.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Documentation</term>
+ <listitem>
+ <para>
+ Validator implementations should document the contents and format of the
+ authenticated ID that is reported to the server for each end user, since
+ DBAs may need to use this information to construct pg_ident maps. (For
+ instance, is it an email address? an organizational ID number? a UUID?)
+ They should also document whether or not it is safe to use the module in
+ <symbol>delegate_ident_mapping=1</symbol> mode, and what additional
+ configuration is required in order to do so.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </sect2>
+
+ <sect2 id="oauth-validator-design-usermap-delegation">
+ <title>Authorizing Users (Usermap Delegation)</title>
+ <para>
+ The standard deliverable of a validation module is the user identifier,
+ which the server will then compare to any configured
+ <link linkend="auth-username-maps"><filename>pg_ident.conf</filename>
+ mappings</link> and determine whether the end user is authorized to connect.
+ However, OAuth is itself an authorization framework, and tokens may carry
+ information about user privileges. For example, a token may be associated
+ with the organizational groups that a user belongs to, or list the roles
+ that a user may assume, and duplicating that knowledge into local usermaps
+ for every server may not be desirable.
+ </para>
+ <para>
+ To bypass username mapping entirely, and have the validator module assume
+ the additional responsibility of authorizing user connections, the HBA may
+ be configured with <xref linkend="auth-oauth-delegate-ident-mapping"/>.
+ The module may then use token scopes or an equivalent method to decide
+ whether the user is allowed to connect under their desired role. The user
+ identifier will still be recorded by the server, but it plays no part in
+ determining whether to continue the connection.
+ </para>
+ <para>
+ Using this scheme, authentication itself is optional. As long as the module
+ reports that the connection is authorized, login will continue even if there
+ is no recorded user identifier at all. This makes it possible to implement
+ anonymous or pseudonymous access to the database, where the third-party
+ provider performs all necessary authentication but does not provide any
+ user-identifying information to the server. (Some providers may create an
+ anonymized ID number that can be recorded instead, for later auditing.)
+ </para>
+ <para>
+ Usermap delegation provides the most architectural flexibility, but it turns
+ the validator module into a single point of failure for connection
+ authorization. Use with caution.
+ </para>
+ </sect2>
+ </sect1>
+
+ <sect1 id="oauth-validator-init">
+ <title>Initialization Functions</title>
+ <indexterm zone="oauth-validator-init">
+ <primary>_PG_oauth_validator_module_init</primary>
+ </indexterm>
+ <para>
+ OAuth validator modules are dynamically loaded from the shared
+ libraries listed in <xref linkend="guc-oauth-validator-libraries"/>.
+ Modules are loaded on demand when requested from a login in progress.
+ The normal library search path is used to locate the library. To
+ provide the validator callbacks and to indicate that the library is an OAuth
+ validator module a function named
+ <function>_PG_oauth_validator_module_init</function> must be provided. The
+ return value of the function must be a pointer to a struct of type
+ <structname>OAuthValidatorCallbacks</structname>, which contains a magic
+ number and pointers to the module's token validation functions. The returned
+ pointer must be of server lifetime, which is typically achieved by defining
+ it as a <literal>static const</literal> variable in global scope.
+<programlisting>
+typedef struct OAuthValidatorCallbacks
+{
+ uint32 magic; /* must be set to PG_OAUTH_VALIDATOR_MAGIC */
+
+ ValidatorStartupCB startup_cb;
+ ValidatorShutdownCB shutdown_cb;
+ ValidatorValidateCB validate_cb;
+} OAuthValidatorCallbacks;
+
+typedef const OAuthValidatorCallbacks *(*OAuthValidatorModuleInit) (void);
+</programlisting>
+
+ Only the <function>validate_cb</function> callback is required, the others
+ are optional.
+ </para>
+ </sect1>
+
+ <sect1 id="oauth-validator-callbacks">
+ <title>OAuth Validator Callbacks</title>
+ <para>
+ OAuth validator modules implement their functionality by defining a set of
+ callbacks. The server will call them as required to process the
+ authentication request from the user.
+ </para>
+
+ <sect2 id="oauth-validator-callback-startup">
+ <title>Startup Callback</title>
+ <para>
+ The <function>startup_cb</function> callback is executed directly after
+ loading the module. This callback can be used to set up local state and
+ perform additional initialization if required. If the validator module
+ has state it can use <structfield>state->private_data</structfield> to
+ store it.
+
+<programlisting>
+typedef void (*ValidatorStartupCB) (ValidatorModuleState *state);
+</programlisting>
+ </para>
+ </sect2>
+
+ <sect2 id="oauth-validator-callback-validate">
+ <title>Validate Callback</title>
+ <para>
+ The <function>validate_cb</function> callback is executed during the OAuth
+ exchange when a user attempts to authenticate using OAuth. Any state set in
+ previous calls will be available in <structfield>state->private_data</structfield>.
+
+<programlisting>
+typedef bool (*ValidatorValidateCB) (const ValidatorModuleState *state,
+ const char *token, const char *role,
+ ValidatorModuleResult *result);
+</programlisting>
+
+ <replaceable>token</replaceable> will contain the bearer token to validate.
+ <application>PostgreSQL</application> has ensured that the token is well-formed syntactically, but no
+ other validation has been performed. <replaceable>role</replaceable> will
+ contain the role the user has requested to log in as. The callback must
+ set output parameters in the <literal>result</literal> struct, which is
+ defined as below:
+
+<programlisting>
+typedef struct ValidatorModuleResult
+{
+ bool authorized;
+ char *authn_id;
+} ValidatorModuleResult;
+</programlisting>
+
+ The connection will only proceed if the module sets
+ <structfield>result->authorized</structfield> to <literal>true</literal>. To
+ authenticate the user, the authenticated user name (as determined using the
+ token) shall be palloc'd and returned in the <structfield>result->authn_id</structfield>
+ field. Alternatively, <structfield>result->authn_id</structfield> may be set to
+ NULL if the token is valid but the associated user identity cannot be
+ determined.
+ </para>
+ <para>
+ A validator may return <literal>false</literal> to signal an internal error,
+ in which case any result parameters are ignored and the connection fails.
+ Otherwise the validator should return <literal>true</literal> to indicate
+ that it has processed the token and made an authorization decision.
+ </para>
+ <para>
+ The behavior after <function>validate_cb</function> returns depends on the
+ specific HBA setup. Normally, the <structfield>result->authn_id</structfield> user
+ name must exactly match the role that the user is logging in as. (This
+ behavior may be modified with a usermap.) But when authenticating against
+ an HBA rule with <literal>delegate_ident_mapping</literal> turned on,
+ <productname>PostgreSQL</productname> will not perform any checks on the value of
+ <structfield>result->authn_id</structfield> at all; in this case it is up to the
+ validator to ensure that the token carries enough privileges for the user to
+ log in under the indicated <replaceable>role</replaceable>.
+ </para>
+ </sect2>
+
+ <sect2 id="oauth-validator-callback-shutdown">
+ <title>Shutdown Callback</title>
+ <para>
+ The <function>shutdown_cb</function> callback is executed when the backend
+ process associated with the connection exits. If the validator module has
+ any allocated state, this callback should free it to avoid resource leaks.
+<programlisting>
+typedef void (*ValidatorShutdownCB) (ValidatorModuleState *state);
+</programlisting>
+ </para>
+ </sect2>
+
+ </sect1>
+</chapter>
&logicaldecoding;
&replication-origins;
&archive-modules;
+ &oauth-validators;
</part>
<para>
<firstterm>SASL</firstterm> is a framework for authentication in connection-oriented
- protocols. At the moment, <productname>PostgreSQL</productname> implements two SASL
- authentication mechanisms, SCRAM-SHA-256 and SCRAM-SHA-256-PLUS. More
- might be added in the future. The below steps illustrate how SASL
- authentication is performed in general, while the next subsection gives
- more details on SCRAM-SHA-256 and SCRAM-SHA-256-PLUS.
+ protocols. At the moment, <productname>PostgreSQL</productname> implements three
+ SASL authentication mechanisms: SCRAM-SHA-256, SCRAM-SHA-256-PLUS, and
+ OAUTHBEARER. More might be added in the future. The below steps illustrate how SASL
+ authentication is performed in general, while the next subsections give
+ more details on particular mechanisms.
</para>
<procedure>
<step id="sasl-auth-end">
<para>
Finally, when the authentication exchange is completed successfully, the
- server sends an AuthenticationSASLFinal message, followed
+ server sends an optional AuthenticationSASLFinal message, followed
immediately by an AuthenticationOk message. The AuthenticationSASLFinal
contains additional server-to-client data, whose content is particular to the
selected authentication mechanism. If the authentication mechanism doesn't
<title>SCRAM-SHA-256 Authentication</title>
<para>
- The implemented SASL mechanisms at the moment
- are <literal>SCRAM-SHA-256</literal> and its variant with channel
- binding <literal>SCRAM-SHA-256-PLUS</literal>. They are described in
+ <literal>SCRAM-SHA-256</literal>, and its variant with channel
+ binding <literal>SCRAM-SHA-256-PLUS</literal>, are password-based
+ authentication mechanisms. They are described in
detail in <ulink url="https://datatracker.ietf.org/doc/html/rfc7677">RFC 7677</ulink>
and <ulink url="https://datatracker.ietf.org/doc/html/rfc5802">RFC 5802</ulink>.
</para>
</step>
</procedure>
</sect2>
+
+ <sect2 id="sasl-oauthbearer">
+ <title>OAUTHBEARER Authentication</title>
+
+ <para>
+ <literal>OAUTHBEARER</literal> is a token-based mechanism for federated
+ authentication. It is described in detail in
+ <ulink url="https://datatracker.ietf.org/doc/html/rfc7628">RFC 7628</ulink>.
+ </para>
+
+ <para>
+ A typical exchange differs depending on whether or not the client already
+ has a bearer token cached for the current user. If it does not, the exchange
+ will take place over two connections: the first "discovery" connection to
+ obtain OAuth metadata from the server, and the second connection to send
+ the token after the client has obtained it. (libpq does not currently
+ implement a caching method as part of its builtin flow, so it uses the
+ two-connection exchange.)
+ </para>
+
+ <para>
+ This mechanism is client-initiated, like SCRAM. The client initial response
+ consists of the standard "GS2" header used by SCRAM, followed by a list of
+ <literal>key=value</literal> pairs. The only key currently supported by
+ the server is <literal>auth</literal>, which contains the bearer token.
+ <literal>OAUTHBEARER</literal> additionally specifies three optional
+ components of the client initial response (the <literal>authzid</literal> of
+ the GS2 header, and the <structfield>host</structfield> and
+ <structfield>port</structfield> keys) which are currently ignored by the
+ server.
+ </para>
+
+ <para>
+ <literal>OAUTHBEARER</literal> does not support channel binding, and there
+ is no "OAUTHBEARER-PLUS" mechanism. This mechanism does not make use of
+ server data during a successful authentication, so the
+ AuthenticationSASLFinal message is not used in the exchange.
+ </para>
+
+ <procedure>
+ <title>Example</title>
+ <step>
+ <para>
+ During the first exchange, the server sends an AuthenticationSASL message
+ with the <literal>OAUTHBEARER</literal> mechanism advertised.
+ </para>
+ </step>
+
+ <step>
+ <para>
+ The client responds by sending a SASLInitialResponse message which
+ indicates the <literal>OAUTHBEARER</literal> mechanism. Assuming the
+ client does not already have a valid bearer token for the current user,
+ the <structfield>auth</structfield> field is empty, indicating a discovery
+ connection.
+ </para>
+ </step>
+
+ <step>
+ <para>
+ Server sends an AuthenticationSASLContinue message containing an error
+ <literal>status</literal> alongside a well-known URI and scopes that the
+ client should use to conduct an OAuth flow.
+ </para>
+ </step>
+
+ <step>
+ <para>
+ Client sends a SASLResponse message containing the empty set (a single
+ <literal>0x01</literal> byte) to finish its half of the discovery
+ exchange.
+ </para>
+ </step>
+
+ <step>
+ <para>
+ Server sends an ErrorMessage to fail the first exchange.
+ </para>
+ <para>
+ At this point, the client conducts one of many possible OAuth flows to
+ obtain a bearer token, using any metadata that it has been configured with
+ in addition to that provided by the server. (This description is left
+ deliberately vague; <literal>OAUTHBEARER</literal> does not specify or
+ mandate any particular method for obtaining a token.)
+ </para>
+ <para>
+ Once it has a token, the client reconnects to the server for the final
+ exchange:
+ </para>
+ </step>
+
+ <step>
+ <para>
+ The server once again sends an AuthenticationSASL message with the
+ <literal>OAUTHBEARER</literal> mechanism advertised.
+ </para>
+ </step>
+
+ <step>
+ <para>
+ The client responds by sending a SASLInitialResponse message, but this
+ time the <structfield>auth</structfield> field in the message contains the
+ bearer token that was obtained during the client flow.
+ </para>
+ </step>
+
+ <step>
+ <para>
+ The server validates the token according to the instructions of the
+ token provider. If the client is authorized to connect, it sends an
+ AuthenticationOk message to end the SASL exchange.
+ </para>
+ </step>
+ </procedure>
+ </sect2>
</sect1>
<sect1 id="protocol-replication">
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><literal>oauth</literal></term>
+ <listitem>
+ <para>
+ Runs the test suite under <filename>src/test/modules/oauth_validator</filename>.
+ This opens TCP/IP listen sockets for a test-server running HTTPS.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
Tests for features that are not supported by the current build
+###############################################################
+# Library: libcurl
+###############################################################
+
+libcurlopt = get_option('libcurl')
+if not libcurlopt.disabled()
+ # Check for libcurl 7.61.0 or higher (corresponding to RHEL8 and the ability
+ # to explicitly set TLS 1.3 ciphersuites).
+ libcurl = dependency('libcurl', version: '>= 7.61.0', required: libcurlopt)
+ if libcurl.found()
+ cdata.set('USE_LIBCURL', 1)
+
+ # Check to see whether the current platform supports thread-safe Curl
+ # initialization.
+ libcurl_threadsafe_init = false
+
+ if not meson.is_cross_build()
+ r = cc.run('''
+ #include <curl/curl.h>
+
+ int main(void)
+ {
+ curl_version_info_data *info;
+
+ if (curl_global_init(CURL_GLOBAL_ALL))
+ return -1;
+
+ info = curl_version_info(CURLVERSION_NOW);
+ #ifdef CURL_VERSION_THREADSAFE
+ if (info->features & CURL_VERSION_THREADSAFE)
+ return 0;
+ #endif
+
+ return 1;
+ }''',
+ name: 'test for curl_global_init thread safety',
+ dependencies: libcurl,
+ )
+
+ assert(r.compiled())
+ if r.returncode() == 0
+ libcurl_threadsafe_init = true
+ message('curl_global_init is thread-safe')
+ elif r.returncode() == 1
+ message('curl_global_init is not thread-safe')
+ else
+ message('curl_global_init failed; assuming not thread-safe')
+ endif
+ endif
+
+ if libcurl_threadsafe_init
+ cdata.set('HAVE_THREADSAFE_CURL_GLOBAL_INIT', 1)
+ endif
+
+ # Warn if a thread-friendly DNS resolver isn't built.
+ libcurl_async_dns = false
+
+ if not meson.is_cross_build()
+ r = cc.run('''
+ #include <curl/curl.h>
+
+ int main(void)
+ {
+ curl_version_info_data *info;
+
+ if (curl_global_init(CURL_GLOBAL_ALL))
+ return -1;
+
+ info = curl_version_info(CURLVERSION_NOW);
+ return (info->features & CURL_VERSION_ASYNCHDNS) ? 0 : 1;
+ }''',
+ name: 'test for curl support for asynchronous DNS',
+ dependencies: libcurl,
+ )
+
+ assert(r.compiled())
+ if r.returncode() == 0
+ libcurl_async_dns = true
+ endif
+ endif
+
+ if not libcurl_async_dns
+ warning('''
+*** The installed version of libcurl does not support asynchronous DNS
+*** lookups. Connection timeouts will not be honored during DNS resolution,
+*** which may lead to hangs in client programs.''')
+ endif
+ endif
+
+else
+ libcurl = not_found_dep
+endif
+
+
+
###############################################################
# Library: libxml
###############################################################
gssapi,
ldap_r,
+ # XXX libcurl must link after libgssapi_krb5 on FreeBSD to avoid segfaults
+ # during gss_acquire_cred(). This is possibly related to Curl's Heimdal
+ # dependency on that platform?
+ libcurl,
libintl,
ssl,
]
'gss': gssapi,
'icu': icu,
'ldap': ldap,
+ 'libcurl': libcurl,
'libxml': libxml,
'libxslt': libxslt,
'llvm': llvm,
option('ldap', type: 'feature', value: 'auto',
description: 'LDAP support')
+option('libcurl', type : 'feature', value: 'auto',
+ description: 'libcurl support')
+
option('libedit_preferred', type: 'boolean', value: false,
description: 'Prefer BSD Libedit over GNU Readline')
with_gssapi = @with_gssapi@
with_krb_srvnam = @with_krb_srvnam@
with_ldap = @with_ldap@
+with_libcurl = @with_libcurl@
with_libxml = @with_libxml@
with_libxslt = @with_libxslt@
with_llvm = @with_llvm@
# be-fsstubs is here for historical reasons, probably belongs elsewhere
OBJS = \
+ auth-oauth.o \
auth-sasl.o \
auth-scram.o \
auth.o \
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * auth-oauth.c
+ * Server-side implementation of the SASL OAUTHBEARER mechanism.
+ *
+ * See the following RFC for more details:
+ * - RFC 7628: https://datatracker.ietf.org/doc/html/rfc7628
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/libpq/auth-oauth.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "common/oauth-common.h"
+#include "fmgr.h"
+#include "lib/stringinfo.h"
+#include "libpq/auth.h"
+#include "libpq/hba.h"
+#include "libpq/oauth.h"
+#include "libpq/sasl.h"
+#include "storage/fd.h"
+#include "storage/ipc.h"
+#include "utils/json.h"
+#include "utils/varlena.h"
+
+/* GUC */
+char *oauth_validator_libraries_string = NULL;
+
+static void oauth_get_mechanisms(Port *port, StringInfo buf);
+static void *oauth_init(Port *port, const char *selected_mech, const char *shadow_pass);
+static int oauth_exchange(void *opaq, const char *input, int inputlen,
+ char **output, int *outputlen, const char **logdetail);
+
+static void load_validator_library(const char *libname);
+static void shutdown_validator_library(void *arg);
+
+static ValidatorModuleState *validator_module_state;
+static const OAuthValidatorCallbacks *ValidatorCallbacks;
+
+/* Mechanism declaration */
+const pg_be_sasl_mech pg_be_oauth_mech = {
+ .get_mechanisms = oauth_get_mechanisms,
+ .init = oauth_init,
+ .exchange = oauth_exchange,
+
+ .max_message_length = PG_MAX_AUTH_TOKEN_LENGTH,
+};
+
+/* Valid states for the oauth_exchange() machine. */
+enum oauth_state
+{
+ OAUTH_STATE_INIT = 0,
+ OAUTH_STATE_ERROR,
+ OAUTH_STATE_FINISHED,
+};
+
+/* Mechanism callback state. */
+struct oauth_ctx
+{
+ enum oauth_state state;
+ Port *port;
+ const char *issuer;
+ const char *scope;
+};
+
+static char *sanitize_char(char c);
+static char *parse_kvpairs_for_auth(char **input);
+static void generate_error_response(struct oauth_ctx *ctx, char **output, int *outputlen);
+static bool validate(Port *port, const char *auth);
+
+/* Constants seen in an OAUTHBEARER client initial response. */
+#define KVSEP 0x01 /* separator byte for key/value pairs */
+#define AUTH_KEY "auth" /* key containing the Authorization header */
+#define BEARER_SCHEME "Bearer " /* required header scheme (case-insensitive!) */
+
+/*
+ * Retrieves the OAUTHBEARER mechanism list (currently a single item).
+ *
+ * For a full description of the API, see libpq/sasl.h.
+ */
+static void
+oauth_get_mechanisms(Port *port, StringInfo buf)
+{
+ /* Only OAUTHBEARER is supported. */
+ appendStringInfoString(buf, OAUTHBEARER_NAME);
+ appendStringInfoChar(buf, '\0');
+}
+
+/*
+ * Initializes mechanism state and loads the configured validator module.
+ *
+ * For a full description of the API, see libpq/sasl.h.
+ */
+static void *
+oauth_init(Port *port, const char *selected_mech, const char *shadow_pass)
+{
+ struct oauth_ctx *ctx;
+
+ if (strcmp(selected_mech, OAUTHBEARER_NAME) != 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("client selected an invalid SASL authentication mechanism"));
+
+ ctx = palloc0(sizeof(*ctx));
+
+ ctx->state = OAUTH_STATE_INIT;
+ ctx->port = port;
+
+ Assert(port->hba);
+ ctx->issuer = port->hba->oauth_issuer;
+ ctx->scope = port->hba->oauth_scope;
+
+ load_validator_library(port->hba->oauth_validator);
+
+ return ctx;
+}
+
+/*
+ * Implements the OAUTHBEARER SASL exchange (RFC 7628, Sec. 3.2). This pulls
+ * apart the client initial response and validates the Bearer token. It also
+ * handles the dummy error response for a failed handshake, as described in
+ * Sec. 3.2.3.
+ *
+ * For a full description of the API, see libpq/sasl.h.
+ */
+static int
+oauth_exchange(void *opaq, const char *input, int inputlen,
+ char **output, int *outputlen, const char **logdetail)
+{
+ char *input_copy;
+ char *p;
+ char cbind_flag;
+ char *auth;
+ int status;
+
+ struct oauth_ctx *ctx = opaq;
+
+ *output = NULL;
+ *outputlen = -1;
+
+ /*
+ * If the client didn't include an "Initial Client Response" in the
+ * SASLInitialResponse message, send an empty challenge, to which the
+ * client will respond with the same data that usually comes in the
+ * Initial Client Response.
+ */
+ if (input == NULL)
+ {
+ Assert(ctx->state == OAUTH_STATE_INIT);
+
+ *output = pstrdup("");
+ *outputlen = 0;
+ return PG_SASL_EXCHANGE_CONTINUE;
+ }
+
+ /*
+ * Check that the input length agrees with the string length of the input.
+ */
+ if (inputlen == 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("The message is empty."));
+ if (inputlen != strlen(input))
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message length does not match input length."));
+
+ switch (ctx->state)
+ {
+ case OAUTH_STATE_INIT:
+ /* Handle this case below. */
+ break;
+
+ case OAUTH_STATE_ERROR:
+
+ /*
+ * Only one response is valid for the client during authentication
+ * failure: a single kvsep.
+ */
+ if (inputlen != 1 || *input != KVSEP)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Client did not send a kvsep response."));
+
+ /* The (failed) handshake is now complete. */
+ ctx->state = OAUTH_STATE_FINISHED;
+ return PG_SASL_EXCHANGE_FAILURE;
+
+ default:
+ elog(ERROR, "invalid OAUTHBEARER exchange state");
+ return PG_SASL_EXCHANGE_FAILURE;
+ }
+
+ /* Handle the client's initial message. */
+ p = input_copy = pstrdup(input);
+
+ /*
+ * OAUTHBEARER does not currently define a channel binding (so there is no
+ * OAUTHBEARER-PLUS, and we do not accept a 'p' specifier). We accept a
+ * 'y' specifier purely for the remote chance that a future specification
+ * could define one; then future clients can still interoperate with this
+ * server implementation. 'n' is the expected case.
+ */
+ cbind_flag = *p;
+ switch (cbind_flag)
+ {
+ case 'p':
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("The server does not support channel binding for OAuth, but the client message includes channel binding data."));
+ break;
+
+ case 'y': /* fall through */
+ case 'n':
+ p++;
+ if (*p != ',')
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Comma expected, but found character \"%s\".",
+ sanitize_char(*p)));
+ p++;
+ break;
+
+ default:
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Unexpected channel-binding flag \"%s\".",
+ sanitize_char(cbind_flag)));
+ }
+
+ /*
+ * Forbid optional authzid (authorization identity). We don't support it.
+ */
+ if (*p == 'a')
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("client uses authorization identity, but it is not supported"));
+ if (*p != ',')
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Unexpected attribute \"%s\" in client-first-message.",
+ sanitize_char(*p)));
+ p++;
+
+ /* All remaining fields are separated by the RFC's kvsep (\x01). */
+ if (*p != KVSEP)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Key-value separator expected, but found character \"%s\".",
+ sanitize_char(*p)));
+ p++;
+
+ auth = parse_kvpairs_for_auth(&p);
+ if (!auth)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message does not contain an auth value."));
+
+ /* We should be at the end of our message. */
+ if (*p)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message contains additional data after the final terminator."));
+
+ if (!validate(ctx->port, auth))
+ {
+ generate_error_response(ctx, output, outputlen);
+
+ ctx->state = OAUTH_STATE_ERROR;
+ status = PG_SASL_EXCHANGE_CONTINUE;
+ }
+ else
+ {
+ ctx->state = OAUTH_STATE_FINISHED;
+ status = PG_SASL_EXCHANGE_SUCCESS;
+ }
+
+ /* Don't let extra copies of the bearer token hang around. */
+ explicit_bzero(input_copy, inputlen);
+
+ return status;
+}
+
+/*
+ * Convert an arbitrary byte to printable form. For error messages.
+ *
+ * If it's a printable ASCII character, print it as a single character.
+ * otherwise, print it in hex.
+ *
+ * The returned pointer points to a static buffer.
+ */
+static char *
+sanitize_char(char c)
+{
+ static char buf[5];
+
+ if (c >= 0x21 && c <= 0x7E)
+ snprintf(buf, sizeof(buf), "'%c'", c);
+ else
+ snprintf(buf, sizeof(buf), "0x%02x", (unsigned char) c);
+ return buf;
+}
+
+/*
+ * Performs syntactic validation of a key and value from the initial client
+ * response. (Semantic validation of interesting values must be performed
+ * later.)
+ */
+static void
+validate_kvpair(const char *key, const char *val)
+{
+ /*-----
+ * From Sec 3.1:
+ * key = 1*(ALPHA)
+ */
+ static const char *key_allowed_set =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+ size_t span;
+
+ if (!key[0])
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message contains an empty key name."));
+
+ span = strspn(key, key_allowed_set);
+ if (key[span] != '\0')
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message contains an invalid key name."));
+
+ /*-----
+ * From Sec 3.1:
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ *
+ * The VCHAR (visible character) class is large; a loop is more
+ * straightforward than strspn().
+ */
+ for (; *val; ++val)
+ {
+ if (0x21 <= *val && *val <= 0x7E)
+ continue; /* VCHAR */
+
+ switch (*val)
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ continue; /* SP, HTAB, CR, LF */
+
+ default:
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message contains an invalid value."));
+ }
+ }
+}
+
+/*
+ * Consumes all kvpairs in an OAUTHBEARER exchange message. If the "auth" key is
+ * found, its value is returned.
+ */
+static char *
+parse_kvpairs_for_auth(char **input)
+{
+ char *pos = *input;
+ char *auth = NULL;
+
+ /*----
+ * The relevant ABNF, from Sec. 3.1:
+ *
+ * kvsep = %x01
+ * key = 1*(ALPHA)
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ * kvpair = key "=" value kvsep
+ * ;;gs2-header = See RFC 5801
+ * client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep
+ *
+ * By the time we reach this code, the gs2-header and initial kvsep have
+ * already been validated. We start at the beginning of the first kvpair.
+ */
+
+ while (*pos)
+ {
+ char *end;
+ char *sep;
+ char *key;
+ char *value;
+
+ /*
+ * Find the end of this kvpair. Note that input is null-terminated by
+ * the SASL code, so the strchr() is bounded.
+ */
+ end = strchr(pos, KVSEP);
+ if (!end)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message contains an unterminated key/value pair."));
+ *end = '\0';
+
+ if (pos == end)
+ {
+ /* Empty kvpair, signifying the end of the list. */
+ *input = pos + 1;
+ return auth;
+ }
+
+ /*
+ * Find the end of the key name.
+ */
+ sep = strchr(pos, '=');
+ if (!sep)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message contains a key without a value."));
+ *sep = '\0';
+
+ /* Both key and value are now safely terminated. */
+ key = pos;
+ value = sep + 1;
+ validate_kvpair(key, value);
+
+ if (strcmp(key, AUTH_KEY) == 0)
+ {
+ if (auth)
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message contains multiple auth values."));
+
+ auth = value;
+ }
+ else
+ {
+ /*
+ * The RFC also defines the host and port keys, but they are not
+ * required for OAUTHBEARER and we do not use them. Also, per Sec.
+ * 3.1, any key/value pairs we don't recognize must be ignored.
+ */
+ }
+
+ /* Move to the next pair. */
+ pos = end + 1;
+ }
+
+ ereport(ERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAUTHBEARER message"),
+ errdetail("Message did not contain a final terminator."));
+
+ pg_unreachable();
+ return NULL;
+}
+
+/*
+ * Builds the JSON response for failed authentication (RFC 7628, Sec. 3.2.2).
+ * This contains the required scopes for entry and a pointer to the OAuth/OpenID
+ * discovery document, which the client may use to conduct its OAuth flow.
+ */
+static void
+generate_error_response(struct oauth_ctx *ctx, char **output, int *outputlen)
+{
+ StringInfoData buf;
+ StringInfoData issuer;
+
+ /*
+ * The admin needs to set an issuer and scope for OAuth to work. There's
+ * not really a way to hide this from the user, either, because we can't
+ * choose a "default" issuer, so be honest in the failure message. (In
+ * practice such configurations are rejected during HBA parsing.)
+ */
+ if (!ctx->issuer || !ctx->scope)
+ ereport(FATAL,
+ errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("OAuth is not properly configured for this user"),
+ errdetail_log("The issuer and scope parameters must be set in pg_hba.conf."));
+
+ /*
+ * Build a default .well-known URI based on our issuer, unless the HBA has
+ * already provided one.
+ */
+ initStringInfo(&issuer);
+ appendStringInfoString(&issuer, ctx->issuer);
+ if (strstr(ctx->issuer, "/.well-known/") == NULL)
+ appendStringInfoString(&issuer, "/.well-known/openid-configuration");
+
+ initStringInfo(&buf);
+
+ /*
+ * Escaping the string here is belt-and-suspenders defensive programming
+ * since escapable characters aren't valid in either the issuer URI or the
+ * scope list, but the HBA doesn't enforce that yet.
+ */
+ appendStringInfoString(&buf, "{ \"status\": \"invalid_token\", ");
+
+ appendStringInfoString(&buf, "\"openid-configuration\": ");
+ escape_json(&buf, issuer.data);
+ pfree(issuer.data);
+
+ appendStringInfoString(&buf, ", \"scope\": ");
+ escape_json(&buf, ctx->scope);
+
+ appendStringInfoString(&buf, " }");
+
+ *output = buf.data;
+ *outputlen = buf.len;
+}
+
+/*-----
+ * Validates the provided Authorization header and returns the token from
+ * within it. NULL is returned on validation failure.
+ *
+ * Only Bearer tokens are accepted. The ABNF is defined in RFC 6750, Sec.
+ * 2.1:
+ *
+ * b64token = 1*( ALPHA / DIGIT /
+ * "-" / "." / "_" / "~" / "+" / "/" ) *"="
+ * credentials = "Bearer" 1*SP b64token
+ *
+ * The "credentials" construction is what we receive in our auth value.
+ *
+ * Since that spec is subordinate to HTTP (i.e. the HTTP Authorization
+ * header format; RFC 9110 Sec. 11), the "Bearer" scheme string must be
+ * compared case-insensitively. (This is not mentioned in RFC 6750, but the
+ * OAUTHBEARER spec points it out: RFC 7628 Sec. 4.)
+ *
+ * Invalid formats are technically a protocol violation, but we shouldn't
+ * reflect any information about the sensitive Bearer token back to the
+ * client; log at COMMERROR instead.
+ */
+static const char *
+validate_token_format(const char *header)
+{
+ size_t span;
+ const char *token;
+ static const char *const b64token_allowed_set =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789-._~+/";
+
+ /* Missing auth headers should be handled by the caller. */
+ Assert(header);
+
+ if (header[0] == '\0')
+ {
+ /*
+ * A completely empty auth header represents a query for
+ * authentication parameters. The client expects it to fail; there's
+ * no need to make any extra noise in the logs.
+ *
+ * TODO: should we find a way to return STATUS_EOF at the top level,
+ * to suppress the authentication error entirely?
+ */
+ return NULL;
+ }
+
+ if (pg_strncasecmp(header, BEARER_SCHEME, strlen(BEARER_SCHEME)))
+ {
+ ereport(COMMERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAuth bearer token"),
+ errdetail_log("Client response indicated a non-Bearer authentication scheme."));
+ return NULL;
+ }
+
+ /* Pull the bearer token out of the auth value. */
+ token = header + strlen(BEARER_SCHEME);
+
+ /* Swallow any additional spaces. */
+ while (*token == ' ')
+ token++;
+
+ /* Tokens must not be empty. */
+ if (!*token)
+ {
+ ereport(COMMERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAuth bearer token"),
+ errdetail_log("Bearer token is empty."));
+ return NULL;
+ }
+
+ /*
+ * Make sure the token contains only allowed characters. Tokens may end
+ * with any number of '=' characters.
+ */
+ span = strspn(token, b64token_allowed_set);
+ while (token[span] == '=')
+ span++;
+
+ if (token[span] != '\0')
+ {
+ /*
+ * This error message could be more helpful by printing the
+ * problematic character(s), but that'd be a bit like printing a piece
+ * of someone's password into the logs.
+ */
+ ereport(COMMERROR,
+ errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("malformed OAuth bearer token"),
+ errdetail_log("Bearer token is not in the correct format."));
+ return NULL;
+ }
+
+ return token;
+}
+
+/*
+ * Checks that the "auth" kvpair in the client response contains a syntactically
+ * valid Bearer token, then passes it along to the loaded validator module for
+ * authorization. Returns true if validation succeeds.
+ */
+static bool
+validate(Port *port, const char *auth)
+{
+ int map_status;
+ ValidatorModuleResult *ret;
+ const char *token;
+ bool status;
+
+ /* Ensure that we have a correct token to validate */
+ if (!(token = validate_token_format(auth)))
+ return false;
+
+ /*
+ * Ensure that we have a validation library loaded, this should always be
+ * the case and an error here is indicative of a bug.
+ */
+ if (!ValidatorCallbacks || !ValidatorCallbacks->validate_cb)
+ ereport(FATAL,
+ errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("validation of OAuth token requested without a validator loaded"));
+
+ /* Call the validation function from the validator module */
+ ret = palloc0(sizeof(ValidatorModuleResult));
+ if (!ValidatorCallbacks->validate_cb(validator_module_state, token,
+ port->user_name, ret))
+ {
+ ereport(WARNING,
+ errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("internal error in OAuth validator module"));
+ return false;
+ }
+
+ /*
+ * Log any authentication results even if the token isn't authorized; it
+ * might be useful for auditing or troubleshooting.
+ */
+ if (ret->authn_id)
+ set_authn_id(port, ret->authn_id);
+
+ if (!ret->authorized)
+ {
+ ereport(LOG,
+ errmsg("OAuth bearer authentication failed for user \"%s\"",
+ port->user_name),
+ errdetail_log("Validator failed to authorize the provided token."));
+
+ status = false;
+ goto cleanup;
+ }
+
+ if (port->hba->oauth_skip_usermap)
+ {
+ /*
+ * If the validator is our authorization authority, we're done.
+ * Authentication may or may not have been performed depending on the
+ * validator implementation; all that matters is that the validator
+ * says the user can log in with the target role.
+ */
+ status = true;
+ goto cleanup;
+ }
+
+ /* Make sure the validator authenticated the user. */
+ if (ret->authn_id == NULL || ret->authn_id[0] == '\0')
+ {
+ ereport(LOG,
+ errmsg("OAuth bearer authentication failed for user \"%s\"",
+ port->user_name),
+ errdetail_log("Validator provided no identity."));
+
+ status = false;
+ goto cleanup;
+ }
+
+ /* Finally, check the user map. */
+ map_status = check_usermap(port->hba->usermap, port->user_name,
+ MyClientConnectionInfo.authn_id, false);
+ status = (map_status == STATUS_OK);
+
+cleanup:
+
+ /*
+ * Clear and free the validation result from the validator module once
+ * we're done with it.
+ */
+ if (ret->authn_id != NULL)
+ pfree(ret->authn_id);
+ pfree(ret);
+
+ return status;
+}
+
+/*
+ * load_validator_library
+ *
+ * Load the configured validator library in order to perform token validation.
+ * There is no built-in fallback since validation is implementation specific. If
+ * no validator library is configured, or if it fails to load, then error out
+ * since token validation won't be possible.
+ */
+static void
+load_validator_library(const char *libname)
+{
+ OAuthValidatorModuleInit validator_init;
+ MemoryContextCallback *mcb;
+
+ /*
+ * The presence, and validity, of libname has already been established by
+ * check_oauth_validator so we don't need to perform more than Assert
+ * level checking here.
+ */
+ Assert(libname && *libname);
+
+ validator_init = (OAuthValidatorModuleInit)
+ load_external_function(libname, "_PG_oauth_validator_module_init",
+ false, NULL);
+
+ /*
+ * The validator init function is required since it will set the callbacks
+ * for the validator library.
+ */
+ if (validator_init == NULL)
+ ereport(ERROR,
+ errmsg("%s module \"%s\" must define the symbol %s",
+ "OAuth validator", libname, "_PG_oauth_validator_module_init"));
+
+ ValidatorCallbacks = (*validator_init) ();
+ Assert(ValidatorCallbacks);
+
+ /*
+ * Check the magic number, to protect against break-glass scenarios where
+ * the ABI must change within a major version. load_external_function()
+ * already checks for compatibility across major versions.
+ */
+ if (ValidatorCallbacks->magic != PG_OAUTH_VALIDATOR_MAGIC)
+ ereport(ERROR,
+ errmsg("%s module \"%s\": magic number mismatch",
+ "OAuth validator", libname),
+ errdetail("Server has magic number 0x%08X, module has 0x%08X.",
+ PG_OAUTH_VALIDATOR_MAGIC, ValidatorCallbacks->magic));
+
+ /*
+ * Make sure all required callbacks are present in the ValidatorCallbacks
+ * structure. Right now only the validation callback is required.
+ */
+ if (ValidatorCallbacks->validate_cb == NULL)
+ ereport(ERROR,
+ errmsg("%s module \"%s\" must provide a %s callback",
+ "OAuth validator", libname, "validate_cb"));
+
+ /* Allocate memory for validator library private state data */
+ validator_module_state = (ValidatorModuleState *) palloc0(sizeof(ValidatorModuleState));
+ validator_module_state->sversion = PG_VERSION_NUM;
+
+ if (ValidatorCallbacks->startup_cb != NULL)
+ ValidatorCallbacks->startup_cb(validator_module_state);
+
+ /* Shut down the library before cleaning up its state. */
+ mcb = palloc0(sizeof(*mcb));
+ mcb->func = shutdown_validator_library;
+
+ MemoryContextRegisterResetCallback(CurrentMemoryContext, mcb);
+}
+
+/*
+ * Call the validator module's shutdown callback, if one is provided. This is
+ * invoked during memory context reset.
+ */
+static void
+shutdown_validator_library(void *arg)
+{
+ if (ValidatorCallbacks->shutdown_cb != NULL)
+ ValidatorCallbacks->shutdown_cb(validator_module_state);
+}
+
+/*
+ * Ensure an OAuth validator named in the HBA is permitted by the configuration.
+ *
+ * If the validator is currently unset and exactly one library is declared in
+ * oauth_validator_libraries, then that library will be used as the validator.
+ * Otherwise the name must be present in the list of oauth_validator_libraries.
+ */
+bool
+check_oauth_validator(HbaLine *hbaline, int elevel, char **err_msg)
+{
+ int line_num = hbaline->linenumber;
+ const char *file_name = hbaline->sourcefile;
+ char *rawstring;
+ List *elemlist = NIL;
+
+ *err_msg = NULL;
+
+ if (oauth_validator_libraries_string[0] == '\0')
+ {
+ ereport(elevel,
+ errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("oauth_validator_libraries must be set for authentication method %s",
+ "oauth"),
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, file_name));
+ *err_msg = psprintf("oauth_validator_libraries must be set for authentication method %s",
+ "oauth");
+ return false;
+ }
+
+ /* SplitDirectoriesString needs a modifiable copy */
+ rawstring = pstrdup(oauth_validator_libraries_string);
+
+ if (!SplitDirectoriesString(rawstring, ',', &elemlist))
+ {
+ /* syntax error in list */
+ ereport(elevel,
+ errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("invalid list syntax in parameter \"%s\"",
+ "oauth_validator_libraries"));
+ *err_msg = psprintf("invalid list syntax in parameter \"%s\"",
+ "oauth_validator_libraries");
+ goto done;
+ }
+
+ if (!hbaline->oauth_validator)
+ {
+ if (elemlist->length == 1)
+ {
+ hbaline->oauth_validator = pstrdup(linitial(elemlist));
+ goto done;
+ }
+
+ ereport(elevel,
+ errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("authentication method \"oauth\" requires argument \"validator\" to be set when oauth_validator_libraries contains multiple options"),
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, file_name));
+ *err_msg = "authentication method \"oauth\" requires argument \"validator\" to be set when oauth_validator_libraries contains multiple options";
+ goto done;
+ }
+
+ foreach_ptr(char, allowed, elemlist)
+ {
+ if (strcmp(allowed, hbaline->oauth_validator) == 0)
+ goto done;
+ }
+
+ ereport(elevel,
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("validator \"%s\" is not permitted by %s",
+ hbaline->oauth_validator, "oauth_validator_libraries"),
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, file_name));
+ *err_msg = psprintf("validator \"%s\" is not permitted by %s",
+ hbaline->oauth_validator, "oauth_validator_libraries");
+
+done:
+ list_free_deep(elemlist);
+ pfree(rawstring);
+
+ return (*err_msg == NULL);
+}
#include "libpq/auth.h"
#include "libpq/crypt.h"
#include "libpq/libpq.h"
+#include "libpq/oauth.h"
#include "libpq/pqformat.h"
#include "libpq/sasl.h"
#include "libpq/scram.h"
*/
static void auth_failed(Port *port, int status, const char *logdetail);
static char *recv_password_packet(Port *port);
-static void set_authn_id(Port *port, const char *id);
/*----------------------------------------------------------------
case uaRADIUS:
errstr = gettext_noop("RADIUS authentication failed for user \"%s\"");
break;
+ case uaOAuth:
+ errstr = gettext_noop("OAuth bearer authentication failed for user \"%s\"");
+ break;
default:
errstr = gettext_noop("authentication failed for user \"%s\": invalid authentication method");
break;
* lifetime of MyClientConnectionInfo, so it is safe to pass a string that is
* managed by an external library.
*/
-static void
+void
set_authn_id(Port *port, const char *id)
{
Assert(id);
case uaTrust:
status = STATUS_OK;
break;
+ case uaOAuth:
+ status = CheckSASLAuth(&pg_be_oauth_mech, port, NULL, NULL);
+ break;
}
if ((status == STATUS_OK && port->hba->clientcert == clientCertFull)
#include "libpq/hba.h"
#include "libpq/ifaddr.h"
#include "libpq/libpq-be.h"
+#include "libpq/oauth.h"
#include "postmaster/postmaster.h"
#include "regex/regex.h"
#include "replication/walsender.h"
"ldap",
"cert",
"radius",
- "peer"
+ "peer",
+ "oauth",
};
/*
#endif
else if (strcmp(token->string, "radius") == 0)
parsedline->auth_method = uaRADIUS;
+ else if (strcmp(token->string, "oauth") == 0)
+ parsedline->auth_method = uaOAuth;
else
{
ereport(elevel,
parsedline->clientcert = clientCertFull;
}
+ /*
+ * Enforce proper configuration of OAuth authentication.
+ */
+ if (parsedline->auth_method == uaOAuth)
+ {
+ MANDATORY_AUTH_ARG(parsedline->oauth_scope, "scope", "oauth");
+ MANDATORY_AUTH_ARG(parsedline->oauth_issuer, "issuer", "oauth");
+
+ /* Ensure a validator library is set and permitted by the config. */
+ if (!check_oauth_validator(parsedline, elevel, err_msg))
+ return NULL;
+
+ /*
+ * Supplying a usermap combined with the option to skip usermapping is
+ * nonsensical and indicates a configuration error.
+ */
+ if (parsedline->oauth_skip_usermap && parsedline->usermap != NULL)
+ {
+ ereport(elevel,
+ errcode(ERRCODE_CONFIG_FILE_ERROR),
+ /* translator: strings are replaced with hba options */
+ errmsg("%s cannot be used in combination with %s",
+ "map", "delegate_ident_mapping"),
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, file_name));
+ *err_msg = "map cannot be used in combination with delegate_ident_mapping";
+ return NULL;
+ }
+ }
+
return parsedline;
}
hbaline->auth_method != uaPeer &&
hbaline->auth_method != uaGSS &&
hbaline->auth_method != uaSSPI &&
- hbaline->auth_method != uaCert)
- INVALID_AUTH_OPTION("map", gettext_noop("ident, peer, gssapi, sspi, and cert"));
+ hbaline->auth_method != uaCert &&
+ hbaline->auth_method != uaOAuth)
+ INVALID_AUTH_OPTION("map", gettext_noop("ident, peer, gssapi, sspi, cert, and oauth"));
hbaline->usermap = pstrdup(val);
}
else if (strcmp(name, "clientcert") == 0)
hbaline->radiusidentifiers = parsed_identifiers;
hbaline->radiusidentifiers_s = pstrdup(val);
}
+ else if (strcmp(name, "issuer") == 0)
+ {
+ REQUIRE_AUTH_OPTION(uaOAuth, "issuer", "oauth");
+ hbaline->oauth_issuer = pstrdup(val);
+ }
+ else if (strcmp(name, "scope") == 0)
+ {
+ REQUIRE_AUTH_OPTION(uaOAuth, "scope", "oauth");
+ hbaline->oauth_scope = pstrdup(val);
+ }
+ else if (strcmp(name, "validator") == 0)
+ {
+ REQUIRE_AUTH_OPTION(uaOAuth, "validator", "oauth");
+ hbaline->oauth_validator = pstrdup(val);
+ }
+ else if (strcmp(name, "delegate_ident_mapping") == 0)
+ {
+ REQUIRE_AUTH_OPTION(uaOAuth, "delegate_ident_mapping", "oauth");
+ if (strcmp(val, "1") == 0)
+ hbaline->oauth_skip_usermap = true;
+ else
+ hbaline->oauth_skip_usermap = false;
+ }
else
{
ereport(elevel,
# Copyright (c) 2022-2025, PostgreSQL Global Development Group
backend_sources += files(
+ 'auth-oauth.c',
'auth-sasl.c',
'auth-scram.c',
'auth.c',
# directly connected to.
#
# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
-# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
-# Note that "password" sends passwords in clear text; "md5" or
+# "gss", "sspi", "ident", "peer", "pam", "oauth", "ldap", "radius" or
+# "cert". Note that "password" sends passwords in clear text; "md5" or
# "scram-sha-256" are preferred since they send encrypted passwords.
#
# OPTIONS are a set of options for the authentication in the format
CStringGetTextDatum(psprintf("radiusports=%s", hba->radiusports_s));
}
+ if (hba->auth_method == uaOAuth)
+ {
+ if (hba->oauth_issuer)
+ options[noptions++] =
+ CStringGetTextDatum(psprintf("issuer=%s", hba->oauth_issuer));
+
+ if (hba->oauth_scope)
+ options[noptions++] =
+ CStringGetTextDatum(psprintf("scope=%s", hba->oauth_scope));
+
+ if (hba->oauth_validator)
+ options[noptions++] =
+ CStringGetTextDatum(psprintf("validator=%s", hba->oauth_validator));
+
+ if (hba->oauth_skip_usermap)
+ options[noptions++] =
+ CStringGetTextDatum(psprintf("delegate_ident_mapping=true"));
+ }
+
/* If you add more options, consider increasing MAX_HBA_OPTIONS. */
Assert(noptions <= MAX_HBA_OPTIONS);
#include "jit/jit.h"
#include "libpq/auth.h"
#include "libpq/libpq.h"
+#include "libpq/oauth.h"
#include "libpq/scram.h"
#include "nodes/queryjumble.h"
#include "optimizer/cost.h"
check_restrict_nonsystem_relation_kind, assign_restrict_nonsystem_relation_kind, NULL
},
+ {
+ {"oauth_validator_libraries", PGC_SIGHUP, CONN_AUTH_AUTH,
+ gettext_noop("Lists libraries that may be called to validate OAuth v2 bearer tokens."),
+ NULL,
+ GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY
+ },
+ &oauth_validator_libraries_string,
+ "",
+ NULL, NULL, NULL
+ },
+
/* End-of-list marker */
{
{NULL, 0, 0, NULL, NULL}, NULL, NULL, NULL, NULL, NULL
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
+# OAuth
+#oauth_validator_libraries = '' # comma-separated list of trusted validator modules
+
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * oauth-common.h
+ * Declarations for helper functions used for OAuth/OIDC authentication
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/common/oauth-common.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef OAUTH_COMMON_H
+#define OAUTH_COMMON_H
+
+/* Name of SASL mechanism per IANA */
+#define OAUTHBEARER_NAME "OAUTHBEARER"
+
+#endif /* OAUTH_COMMON_H */
extern void ClientAuthentication(Port *port);
extern void sendAuthRequest(Port *port, AuthRequest areq, const char *extradata,
int extralen);
+extern void set_authn_id(Port *port, const char *id);
/* Hook for plugins to get control in ClientAuthentication() */
typedef void (*ClientAuthentication_hook_type) (Port *, int);
uaCert,
uaRADIUS,
uaPeer,
-#define USER_AUTH_LAST uaPeer /* Must be last value of this enum */
+ uaOAuth,
+#define USER_AUTH_LAST uaOAuth /* Must be last value of this enum */
} UserAuth;
/*
char *radiusidentifiers_s;
List *radiusports;
char *radiusports_s;
+ char *oauth_issuer;
+ char *oauth_scope;
+ char *oauth_validator;
+ bool oauth_skip_usermap;
} HbaLine;
typedef struct IdentLine
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * oauth.h
+ * Interface to libpq/auth-oauth.c
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/libpq/oauth.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PG_OAUTH_H
+#define PG_OAUTH_H
+
+#include "libpq/libpq-be.h"
+#include "libpq/sasl.h"
+
+extern PGDLLIMPORT char *oauth_validator_libraries_string;
+
+typedef struct ValidatorModuleState
+{
+ /* Holds the server's PG_VERSION_NUM. Reserved for future extensibility. */
+ int sversion;
+
+ /*
+ * Private data pointer for use by a validator module. This can be used to
+ * store state for the module that will be passed to each of its
+ * callbacks.
+ */
+ void *private_data;
+} ValidatorModuleState;
+
+typedef struct ValidatorModuleResult
+{
+ /*
+ * Should be set to true if the token carries sufficient permissions for
+ * the bearer to connect.
+ */
+ bool authorized;
+
+ /*
+ * If the token authenticates the user, this should be set to a palloc'd
+ * string containing the SYSTEM_USER to use for HBA mapping. Consider
+ * setting this even if result->authorized is false so that DBAs may use
+ * the logs to match end users to token failures.
+ *
+ * This is required if the module is not configured for ident mapping
+ * delegation. See the validator module documentation for details.
+ */
+ char *authn_id;
+} ValidatorModuleResult;
+
+/*
+ * Validator module callbacks
+ *
+ * These callback functions should be defined by validator modules and returned
+ * via _PG_oauth_validator_module_init(). ValidatorValidateCB is the only
+ * required callback. For more information about the purpose of each callback,
+ * refer to the OAuth validator modules documentation.
+ */
+typedef void (*ValidatorStartupCB) (ValidatorModuleState *state);
+typedef void (*ValidatorShutdownCB) (ValidatorModuleState *state);
+typedef bool (*ValidatorValidateCB) (const ValidatorModuleState *state,
+ const char *token, const char *role,
+ ValidatorModuleResult *result);
+
+/*
+ * Identifies the compiled ABI version of the validator module. Since the server
+ * already enforces the PG_MODULE_MAGIC number for modules across major
+ * versions, this is reserved for emergency use within a stable release line.
+ * May it never need to change.
+ */
+#define PG_OAUTH_VALIDATOR_MAGIC 0x20250220
+
+typedef struct OAuthValidatorCallbacks
+{
+ uint32 magic; /* must be set to PG_OAUTH_VALIDATOR_MAGIC */
+
+ ValidatorStartupCB startup_cb;
+ ValidatorShutdownCB shutdown_cb;
+ ValidatorValidateCB validate_cb;
+} OAuthValidatorCallbacks;
+
+/*
+ * Type of the shared library symbol _PG_oauth_validator_module_init which is
+ * required for all validator modules. This function will be invoked during
+ * module loading.
+ */
+typedef const OAuthValidatorCallbacks *(*OAuthValidatorModuleInit) (void);
+extern PGDLLEXPORT const OAuthValidatorCallbacks *_PG_oauth_validator_module_init(void);
+
+/* Implementation */
+extern const pg_be_sasl_mech pg_be_oauth_mech;
+
+/*
+ * Ensure a validator named in the HBA is permitted by the configuration.
+ */
+extern bool check_oauth_validator(HbaLine *hba, int elevel, char **err_msg);
+
+#endif /* PG_OAUTH_H */
/* Define to 1 if you have the `crypto' library (-lcrypto). */
#undef HAVE_LIBCRYPTO
+/* Define to 1 if you have the `curl' library (-lcurl). */
+#undef HAVE_LIBCURL
+
/* Define to 1 if you have the `ldap' library (-lldap). */
#undef HAVE_LIBLDAP
/* Define to 1 if you have the <termios.h> header file. */
#undef HAVE_TERMIOS_H
+/* Define to 1 if curl_global_init() is guaranteed to be thread-safe. */
+#undef HAVE_THREADSAFE_CURL_GLOBAL_INIT
+
/* Define to 1 if your compiler understands `typeof' or something similar. */
#undef HAVE_TYPEOF
/* Define to 1 to build with LDAP support. (--with-ldap) */
#undef USE_LDAP
+/* Define to 1 to build with libcurl support. (--with-libcurl) */
+#undef USE_LIBCURL
+
/* Define to 1 to build with XML support. (--with-libxml) */
#undef USE_LIBXML
OBJS = \
$(WIN32RES) \
+ fe-auth-oauth.o \
fe-auth-scram.o \
fe-cancel.o \
fe-connect.o \
fe-secure-gssapi.o
endif
+ifeq ($(with_libcurl),yes)
+OBJS += fe-auth-oauth-curl.o
+endif
+
ifeq ($(PORTNAME), cygwin)
override shlib = cyg$(NAME)$(DLSUFFIX)
endif
# that are built correctly for use in a shlib.
SHLIB_LINK_INTERNAL = -lpgcommon_shlib -lpgport_shlib
ifneq ($(PORTNAME), win32)
-SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lssl -lsocket -lnsl -lresolv -lintl -lm, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS)
+SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lssl -lcurl -lsocket -lnsl -lresolv -lintl -lm, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS)
else
SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi32 -lssl -lsocket -lnsl -lresolv -lintl -lm $(PTHREAD_LIBS), $(LIBS)) $(LDAP_LIBS_FE)
endif
# which seems to insert references to that even in pure C code. Excluding
# __tsan_func_exit is necessary when using ThreadSanitizer data race detector
# which use this function for instrumentation of function exit.
+# libcurl registers an exit handler in the memory debugging code when running
+# with LeakSanitizer.
# Skip the test when profiling, as gcc may insert exit() calls for that.
# Also skip the test on platforms where libpq infrastructure may be provided
# by statically-linked libraries, as we can't expect them to honor this
libpq-refs-stamp: $(shlib)
ifneq ($(enable_coverage), yes)
ifeq (,$(filter solaris,$(PORTNAME)))
- @if nm -A -u $< 2>/dev/null | grep -v -e __cxa_atexit -e __tsan_func_exit | grep exit; then \
+ @if nm -A -u $< 2>/dev/null | grep -v -e __cxa_atexit -e __tsan_func_exit -e _atexit | grep exit; then \
echo 'libpq must not be calling any function which invokes exit'; exit 1; \
fi
endif
PQsetChunkedRowsMode 204
PQgetCurrentTimeUSec 205
PQservice 206
+PQsetAuthDataHook 207
+PQgetAuthDataHook 208
+PQdefaultAuthDataHook 209
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * fe-auth-oauth-curl.c
+ * The libcurl implementation of OAuth/OIDC authentication, using the
+ * OAuth Device Authorization Grant (RFC 8628).
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/interfaces/libpq/fe-auth-oauth-curl.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres_fe.h"
+
+#include <curl/curl.h>
+#include <math.h>
+#ifdef HAVE_SYS_EPOLL_H
+#include <sys/epoll.h>
+#include <sys/timerfd.h>
+#endif
+#ifdef HAVE_SYS_EVENT_H
+#include <sys/event.h>
+#endif
+#include <unistd.h>
+
+#include "common/jsonapi.h"
+#include "fe-auth.h"
+#include "fe-auth-oauth.h"
+#include "libpq-int.h"
+#include "mb/pg_wchar.h"
+
+/*
+ * It's generally prudent to set a maximum response size to buffer in memory,
+ * but it's less clear what size to choose. The biggest of our expected
+ * responses is the server metadata JSON, which will only continue to grow in
+ * size; the number of IANA-registered parameters in that document is up to 78
+ * as of February 2025.
+ *
+ * Even if every single parameter were to take up 2k on average (a previously
+ * common limit on the size of a URL), 256k gives us 128 parameter values before
+ * we give up. (That's almost certainly complete overkill in practice; 2-4k
+ * appears to be common among popular providers at the moment.)
+ */
+#define MAX_OAUTH_RESPONSE_SIZE (256 * 1024)
+
+/*
+ * Parsed JSON Representations
+ *
+ * As a general rule, we parse and cache only the fields we're currently using.
+ * When adding new fields, ensure the corresponding free_*() function is updated
+ * too.
+ */
+
+/*
+ * The OpenID Provider configuration (alternatively named "authorization server
+ * metadata") jointly described by OpenID Connect Discovery 1.0 and RFC 8414:
+ *
+ * https://openid.net/specs/openid-connect-discovery-1_0.html
+ * https://www.rfc-editor.org/rfc/rfc8414#section-3.2
+ */
+struct provider
+{
+ char *issuer;
+ char *token_endpoint;
+ char *device_authorization_endpoint;
+ struct curl_slist *grant_types_supported;
+};
+
+static void
+free_provider(struct provider *provider)
+{
+ free(provider->issuer);
+ free(provider->token_endpoint);
+ free(provider->device_authorization_endpoint);
+ curl_slist_free_all(provider->grant_types_supported);
+}
+
+/*
+ * The Device Authorization response, described by RFC 8628:
+ *
+ * https://www.rfc-editor.org/rfc/rfc8628#section-3.2
+ */
+struct device_authz
+{
+ char *device_code;
+ char *user_code;
+ char *verification_uri;
+ char *verification_uri_complete;
+ char *expires_in_str;
+ char *interval_str;
+
+ /* Fields below are parsed from the corresponding string above. */
+ int expires_in;
+ int interval;
+};
+
+static void
+free_device_authz(struct device_authz *authz)
+{
+ free(authz->device_code);
+ free(authz->user_code);
+ free(authz->verification_uri);
+ free(authz->verification_uri_complete);
+ free(authz->expires_in_str);
+ free(authz->interval_str);
+}
+
+/*
+ * The Token Endpoint error response, as described by RFC 6749:
+ *
+ * https://www.rfc-editor.org/rfc/rfc6749#section-5.2
+ *
+ * Note that this response type can also be returned from the Device
+ * Authorization Endpoint.
+ */
+struct token_error
+{
+ char *error;
+ char *error_description;
+};
+
+static void
+free_token_error(struct token_error *err)
+{
+ free(err->error);
+ free(err->error_description);
+}
+
+/*
+ * The Access Token response, as described by RFC 6749:
+ *
+ * https://www.rfc-editor.org/rfc/rfc6749#section-4.1.4
+ *
+ * During the Device Authorization flow, several temporary errors are expected
+ * as part of normal operation. To make it easy to handle these in the happy
+ * path, this contains an embedded token_error that is filled in if needed.
+ */
+struct token
+{
+ /* for successful responses */
+ char *access_token;
+ char *token_type;
+
+ /* for error responses */
+ struct token_error err;
+};
+
+static void
+free_token(struct token *tok)
+{
+ free(tok->access_token);
+ free(tok->token_type);
+ free_token_error(&tok->err);
+}
+
+/*
+ * Asynchronous State
+ */
+
+/* States for the overall async machine. */
+enum OAuthStep
+{
+ OAUTH_STEP_INIT = 0,
+ OAUTH_STEP_DISCOVERY,
+ OAUTH_STEP_DEVICE_AUTHORIZATION,
+ OAUTH_STEP_TOKEN_REQUEST,
+ OAUTH_STEP_WAIT_INTERVAL,
+};
+
+/*
+ * The async_ctx holds onto state that needs to persist across multiple calls
+ * to pg_fe_run_oauth_flow(). Almost everything interacts with this in some
+ * way.
+ */
+struct async_ctx
+{
+ enum OAuthStep step; /* where are we in the flow? */
+
+ int timerfd; /* descriptor for signaling async timeouts */
+ pgsocket mux; /* the multiplexer socket containing all
+ * descriptors tracked by libcurl, plus the
+ * timerfd */
+ CURLM *curlm; /* top-level multi handle for libcurl
+ * operations */
+ CURL *curl; /* the (single) easy handle for serial
+ * requests */
+
+ struct curl_slist *headers; /* common headers for all requests */
+ PQExpBufferData work_data; /* scratch buffer for general use (remember to
+ * clear out prior contents first!) */
+
+ /*------
+ * Since a single logical operation may stretch across multiple calls to
+ * our entry point, errors have three parts:
+ *
+ * - errctx: an optional static string, describing the global operation
+ * currently in progress. It'll be translated for you.
+ *
+ * - errbuf: contains the actual error message. Generally speaking, use
+ * actx_error[_str] to manipulate this. This must be filled
+ * with something useful on an error.
+ *
+ * - curl_err: an optional static error buffer used by libcurl to put
+ * detailed information about failures. Unfortunately
+ * untranslatable.
+ *
+ * These pieces will be combined into a single error message looking
+ * something like the following, with errctx and/or curl_err omitted when
+ * absent:
+ *
+ * connection to server ... failed: errctx: errbuf (libcurl: curl_err)
+ */
+ const char *errctx; /* not freed; must point to static allocation */
+ PQExpBufferData errbuf;
+ char curl_err[CURL_ERROR_SIZE];
+
+ /*
+ * These documents need to survive over multiple calls, and are therefore
+ * cached directly in the async_ctx.
+ */
+ struct provider provider;
+ struct device_authz authz;
+
+ int running; /* is asynchronous work in progress? */
+ bool user_prompted; /* have we already sent the authz prompt? */
+ bool used_basic_auth; /* did we send a client secret? */
+ bool debugging; /* can we give unsafe developer assistance? */
+};
+
+/*
+ * Tears down the Curl handles and frees the async_ctx.
+ */
+static void
+free_async_ctx(PGconn *conn, struct async_ctx *actx)
+{
+ /*
+ * In general, none of the error cases below should ever happen if we have
+ * no bugs above. But if we do hit them, surfacing those errors somehow
+ * might be the only way to have a chance to debug them.
+ *
+ * TODO: At some point it'd be nice to have a standard way to warn about
+ * teardown failures. Appending to the connection's error message only
+ * helps if the bug caused a connection failure; otherwise it'll be
+ * buried...
+ */
+
+ if (actx->curlm && actx->curl)
+ {
+ CURLMcode err = curl_multi_remove_handle(actx->curlm, actx->curl);
+
+ if (err)
+ libpq_append_conn_error(conn,
+ "libcurl easy handle removal failed: %s",
+ curl_multi_strerror(err));
+ }
+
+ if (actx->curl)
+ {
+ /*
+ * curl_multi_cleanup() doesn't free any associated easy handles; we
+ * need to do that separately. We only ever have one easy handle per
+ * multi handle.
+ */
+ curl_easy_cleanup(actx->curl);
+ }
+
+ if (actx->curlm)
+ {
+ CURLMcode err = curl_multi_cleanup(actx->curlm);
+
+ if (err)
+ libpq_append_conn_error(conn,
+ "libcurl multi handle cleanup failed: %s",
+ curl_multi_strerror(err));
+ }
+
+ free_provider(&actx->provider);
+ free_device_authz(&actx->authz);
+
+ curl_slist_free_all(actx->headers);
+ termPQExpBuffer(&actx->work_data);
+ termPQExpBuffer(&actx->errbuf);
+
+ if (actx->mux != PGINVALID_SOCKET)
+ close(actx->mux);
+ if (actx->timerfd >= 0)
+ close(actx->timerfd);
+
+ free(actx);
+}
+
+/*
+ * Release resources used for the asynchronous exchange and disconnect the
+ * altsock.
+ *
+ * This is called either at the end of a successful authentication, or during
+ * pqDropConnection(), so we won't leak resources even if PQconnectPoll() never
+ * calls us back.
+ */
+void
+pg_fe_cleanup_oauth_flow(PGconn *conn)
+{
+ fe_oauth_state *state = conn->sasl_state;
+
+ if (state->async_ctx)
+ {
+ free_async_ctx(conn, state->async_ctx);
+ state->async_ctx = NULL;
+ }
+
+ conn->altsock = PGINVALID_SOCKET;
+}
+
+/*
+ * Macros for manipulating actx->errbuf. actx_error() translates and formats a
+ * string for you; actx_error_str() appends a string directly without
+ * translation.
+ */
+
+#define actx_error(ACTX, FMT, ...) \
+ appendPQExpBuffer(&(ACTX)->errbuf, libpq_gettext(FMT), ##__VA_ARGS__)
+
+#define actx_error_str(ACTX, S) \
+ appendPQExpBufferStr(&(ACTX)->errbuf, S)
+
+/*
+ * Macros for getting and setting state for the connection's two libcurl
+ * handles, so you don't have to write out the error handling every time.
+ */
+
+#define CHECK_MSETOPT(ACTX, OPT, VAL, FAILACTION) \
+ do { \
+ struct async_ctx *_actx = (ACTX); \
+ CURLMcode _setopterr = curl_multi_setopt(_actx->curlm, OPT, VAL); \
+ if (_setopterr) { \
+ actx_error(_actx, "failed to set %s on OAuth connection: %s",\
+ #OPT, curl_multi_strerror(_setopterr)); \
+ FAILACTION; \
+ } \
+ } while (0)
+
+#define CHECK_SETOPT(ACTX, OPT, VAL, FAILACTION) \
+ do { \
+ struct async_ctx *_actx = (ACTX); \
+ CURLcode _setopterr = curl_easy_setopt(_actx->curl, OPT, VAL); \
+ if (_setopterr) { \
+ actx_error(_actx, "failed to set %s on OAuth connection: %s",\
+ #OPT, curl_easy_strerror(_setopterr)); \
+ FAILACTION; \
+ } \
+ } while (0)
+
+#define CHECK_GETINFO(ACTX, INFO, OUT, FAILACTION) \
+ do { \
+ struct async_ctx *_actx = (ACTX); \
+ CURLcode _getinfoerr = curl_easy_getinfo(_actx->curl, INFO, OUT); \
+ if (_getinfoerr) { \
+ actx_error(_actx, "failed to get %s from OAuth response: %s",\
+ #INFO, curl_easy_strerror(_getinfoerr)); \
+ FAILACTION; \
+ } \
+ } while (0)
+
+/*
+ * General JSON Parsing for OAuth Responses
+ */
+
+/*
+ * Represents a single name/value pair in a JSON object. This is the primary
+ * interface to parse_oauth_json().
+ *
+ * All fields are stored internally as strings or lists of strings, so clients
+ * have to explicitly parse other scalar types (though they will have gone
+ * through basic lexical validation). Storing nested objects is not currently
+ * supported, nor is parsing arrays of anything other than strings.
+ */
+struct json_field
+{
+ const char *name; /* name (key) of the member */
+
+ JsonTokenType type; /* currently supports JSON_TOKEN_STRING,
+ * JSON_TOKEN_NUMBER, and
+ * JSON_TOKEN_ARRAY_START */
+ union
+ {
+ char **scalar; /* for all scalar types */
+ struct curl_slist **array; /* for type == JSON_TOKEN_ARRAY_START */
+ } target;
+
+ bool required; /* REQUIRED field, or just OPTIONAL? */
+};
+
+/* Documentation macros for json_field.required. */
+#define REQUIRED true
+#define OPTIONAL false
+
+/* Parse state for parse_oauth_json(). */
+struct oauth_parse
+{
+ PQExpBuffer errbuf; /* detail message for JSON_SEM_ACTION_FAILED */
+ int nested; /* nesting level (zero is the top) */
+
+ const struct json_field *fields; /* field definition array */
+ const struct json_field *active; /* points inside the fields array */
+};
+
+#define oauth_parse_set_error(ctx, fmt, ...) \
+ appendPQExpBuffer((ctx)->errbuf, libpq_gettext(fmt), ##__VA_ARGS__)
+
+static void
+report_type_mismatch(struct oauth_parse *ctx)
+{
+ char *msgfmt;
+
+ Assert(ctx->active);
+
+ /*
+ * At the moment, the only fields we're interested in are strings,
+ * numbers, and arrays of strings.
+ */
+ switch (ctx->active->type)
+ {
+ case JSON_TOKEN_STRING:
+ msgfmt = "field \"%s\" must be a string";
+ break;
+
+ case JSON_TOKEN_NUMBER:
+ msgfmt = "field \"%s\" must be a number";
+ break;
+
+ case JSON_TOKEN_ARRAY_START:
+ msgfmt = "field \"%s\" must be an array of strings";
+ break;
+
+ default:
+ Assert(false);
+ msgfmt = "field \"%s\" has unexpected type";
+ }
+
+ oauth_parse_set_error(ctx, msgfmt, ctx->active->name);
+}
+
+static JsonParseErrorType
+oauth_json_object_start(void *state)
+{
+ struct oauth_parse *ctx = state;
+
+ if (ctx->active)
+ {
+ /*
+ * Currently, none of the fields we're interested in can be or contain
+ * objects, so we can reject this case outright.
+ */
+ report_type_mismatch(ctx);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ ++ctx->nested;
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_object_field_start(void *state, char *name, bool isnull)
+{
+ struct oauth_parse *ctx = state;
+
+ /* We care only about the top-level fields. */
+ if (ctx->nested == 1)
+ {
+ const struct json_field *field = ctx->fields;
+
+ /*
+ * We should never start parsing a new field while a previous one is
+ * still active.
+ */
+ if (ctx->active)
+ {
+ Assert(false);
+ oauth_parse_set_error(ctx,
+ "internal error: started field '%s' before field '%s' was finished",
+ name, ctx->active->name);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ while (field->name)
+ {
+ if (strcmp(name, field->name) == 0)
+ {
+ ctx->active = field;
+ break;
+ }
+
+ ++field;
+ }
+
+ /*
+ * We don't allow duplicate field names; error out if the target has
+ * already been set.
+ */
+ if (ctx->active)
+ {
+ field = ctx->active;
+
+ if ((field->type == JSON_TOKEN_ARRAY_START && *field->target.array)
+ || (field->type != JSON_TOKEN_ARRAY_START && *field->target.scalar))
+ {
+ oauth_parse_set_error(ctx, "field \"%s\" is duplicated",
+ field->name);
+ return JSON_SEM_ACTION_FAILED;
+ }
+ }
+ }
+
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_object_end(void *state)
+{
+ struct oauth_parse *ctx = state;
+
+ --ctx->nested;
+
+ /*
+ * All fields should be fully processed by the end of the top-level
+ * object.
+ */
+ if (!ctx->nested && ctx->active)
+ {
+ Assert(false);
+ oauth_parse_set_error(ctx,
+ "internal error: field '%s' still active at end of object",
+ ctx->active->name);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_array_start(void *state)
+{
+ struct oauth_parse *ctx = state;
+
+ if (!ctx->nested)
+ {
+ oauth_parse_set_error(ctx, "top-level element must be an object");
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ if (ctx->active)
+ {
+ if (ctx->active->type != JSON_TOKEN_ARRAY_START
+ /* The arrays we care about must not have arrays as values. */
+ || ctx->nested > 1)
+ {
+ report_type_mismatch(ctx);
+ return JSON_SEM_ACTION_FAILED;
+ }
+ }
+
+ ++ctx->nested;
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_array_end(void *state)
+{
+ struct oauth_parse *ctx = state;
+
+ if (ctx->active)
+ {
+ /*
+ * Clear the target (which should be an array inside the top-level
+ * object). For this to be safe, no target arrays can contain other
+ * arrays; we check for that in the array_start callback.
+ */
+ if (ctx->nested != 2 || ctx->active->type != JSON_TOKEN_ARRAY_START)
+ {
+ Assert(false);
+ oauth_parse_set_error(ctx,
+ "internal error: found unexpected array end while parsing field '%s'",
+ ctx->active->name);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ ctx->active = NULL;
+ }
+
+ --ctx->nested;
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_scalar(void *state, char *token, JsonTokenType type)
+{
+ struct oauth_parse *ctx = state;
+
+ if (!ctx->nested)
+ {
+ oauth_parse_set_error(ctx, "top-level element must be an object");
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ if (ctx->active)
+ {
+ const struct json_field *field = ctx->active;
+ JsonTokenType expected = field->type;
+
+ /* Make sure this matches what the active field expects. */
+ if (expected == JSON_TOKEN_ARRAY_START)
+ {
+ /* Are we actually inside an array? */
+ if (ctx->nested < 2)
+ {
+ report_type_mismatch(ctx);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ /* Currently, arrays can only contain strings. */
+ expected = JSON_TOKEN_STRING;
+ }
+
+ if (type != expected)
+ {
+ report_type_mismatch(ctx);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ if (field->type != JSON_TOKEN_ARRAY_START)
+ {
+ /* Ensure that we're parsing the top-level keys... */
+ if (ctx->nested != 1)
+ {
+ Assert(false);
+ oauth_parse_set_error(ctx,
+ "internal error: scalar target found at nesting level %d",
+ ctx->nested);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ /* ...and that a result has not already been set. */
+ if (*field->target.scalar)
+ {
+ Assert(false);
+ oauth_parse_set_error(ctx,
+ "internal error: scalar field '%s' would be assigned twice",
+ ctx->active->name);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ *field->target.scalar = strdup(token);
+ if (!*field->target.scalar)
+ return JSON_OUT_OF_MEMORY;
+
+ ctx->active = NULL;
+
+ return JSON_SUCCESS;
+ }
+ else
+ {
+ struct curl_slist *temp;
+
+ /* The target array should be inside the top-level object. */
+ if (ctx->nested != 2)
+ {
+ Assert(false);
+ oauth_parse_set_error(ctx,
+ "internal error: array member found at nesting level %d",
+ ctx->nested);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ /* Note that curl_slist_append() makes a copy of the token. */
+ temp = curl_slist_append(*field->target.array, token);
+ if (!temp)
+ return JSON_OUT_OF_MEMORY;
+
+ *field->target.array = temp;
+ }
+ }
+ else
+ {
+ /* otherwise we just ignore it */
+ }
+
+ return JSON_SUCCESS;
+}
+
+/*
+ * Checks the Content-Type header against the expected type. Parameters are
+ * allowed but ignored.
+ */
+static bool
+check_content_type(struct async_ctx *actx, const char *type)
+{
+ const size_t type_len = strlen(type);
+ char *content_type;
+
+ CHECK_GETINFO(actx, CURLINFO_CONTENT_TYPE, &content_type, return false);
+
+ if (!content_type)
+ {
+ actx_error(actx, "no content type was provided");
+ return false;
+ }
+
+ /*
+ * We need to perform a length limited comparison and not compare the
+ * whole string.
+ */
+ if (pg_strncasecmp(content_type, type, type_len) != 0)
+ goto fail;
+
+ /* On an exact match, we're done. */
+ Assert(strlen(content_type) >= type_len);
+ if (content_type[type_len] == '\0')
+ return true;
+
+ /*
+ * Only a semicolon (optionally preceded by HTTP optional whitespace) is
+ * acceptable after the prefix we checked. This marks the start of media
+ * type parameters, which we currently have no use for.
+ */
+ for (size_t i = type_len; content_type[i]; ++i)
+ {
+ switch (content_type[i])
+ {
+ case ';':
+ return true; /* success! */
+
+ case ' ':
+ case '\t':
+ /* HTTP optional whitespace allows only spaces and htabs. */
+ break;
+
+ default:
+ goto fail;
+ }
+ }
+
+fail:
+ actx_error(actx, "unexpected content type: \"%s\"", content_type);
+ return false;
+}
+
+/*
+ * A helper function for general JSON parsing. fields is the array of field
+ * definitions with their backing pointers. The response will be parsed from
+ * actx->curl and actx->work_data (as set up by start_request()), and any
+ * parsing errors will be placed into actx->errbuf.
+ */
+static bool
+parse_oauth_json(struct async_ctx *actx, const struct json_field *fields)
+{
+ PQExpBuffer resp = &actx->work_data;
+ JsonLexContext lex = {0};
+ JsonSemAction sem = {0};
+ JsonParseErrorType err;
+ struct oauth_parse ctx = {0};
+ bool success = false;
+
+ if (!check_content_type(actx, "application/json"))
+ return false;
+
+ if (strlen(resp->data) != resp->len)
+ {
+ actx_error(actx, "response contains embedded NULLs");
+ return false;
+ }
+
+ /*
+ * pg_parse_json doesn't validate the incoming UTF-8, so we have to check
+ * that up front.
+ */
+ if (pg_encoding_verifymbstr(PG_UTF8, resp->data, resp->len) != resp->len)
+ {
+ actx_error(actx, "response is not valid UTF-8");
+ return false;
+ }
+
+ makeJsonLexContextCstringLen(&lex, resp->data, resp->len, PG_UTF8, true);
+ setJsonLexContextOwnsTokens(&lex, true); /* must not leak on error */
+
+ ctx.errbuf = &actx->errbuf;
+ ctx.fields = fields;
+ sem.semstate = &ctx;
+
+ sem.object_start = oauth_json_object_start;
+ sem.object_field_start = oauth_json_object_field_start;
+ sem.object_end = oauth_json_object_end;
+ sem.array_start = oauth_json_array_start;
+ sem.array_end = oauth_json_array_end;
+ sem.scalar = oauth_json_scalar;
+
+ err = pg_parse_json(&lex, &sem);
+
+ if (err != JSON_SUCCESS)
+ {
+ /*
+ * For JSON_SEM_ACTION_FAILED, we've already written the error
+ * message. Other errors come directly from pg_parse_json(), already
+ * translated.
+ */
+ if (err != JSON_SEM_ACTION_FAILED)
+ actx_error_str(actx, json_errdetail(err, &lex));
+
+ goto cleanup;
+ }
+
+ /* Check all required fields. */
+ while (fields->name)
+ {
+ if (fields->required
+ && !*fields->target.scalar
+ && !*fields->target.array)
+ {
+ actx_error(actx, "field \"%s\" is missing", fields->name);
+ goto cleanup;
+ }
+
+ fields++;
+ }
+
+ success = true;
+
+cleanup:
+ freeJsonLexContext(&lex);
+ return success;
+}
+
+/*
+ * JSON Parser Definitions
+ */
+
+/*
+ * Parses authorization server metadata. Fields are defined by OIDC Discovery
+ * 1.0 and RFC 8414.
+ */
+static bool
+parse_provider(struct async_ctx *actx, struct provider *provider)
+{
+ struct json_field fields[] = {
+ {"issuer", JSON_TOKEN_STRING, {&provider->issuer}, REQUIRED},
+ {"token_endpoint", JSON_TOKEN_STRING, {&provider->token_endpoint}, REQUIRED},
+
+ /*----
+ * The following fields are technically REQUIRED, but we don't use
+ * them anywhere yet:
+ *
+ * - jwks_uri
+ * - response_types_supported
+ * - subject_types_supported
+ * - id_token_signing_alg_values_supported
+ */
+
+ {"device_authorization_endpoint", JSON_TOKEN_STRING, {&provider->device_authorization_endpoint}, OPTIONAL},
+ {"grant_types_supported", JSON_TOKEN_ARRAY_START, {.array = &provider->grant_types_supported}, OPTIONAL},
+
+ {0},
+ };
+
+ return parse_oauth_json(actx, fields);
+}
+
+/*
+ * Parses a valid JSON number into a double. The input must have come from
+ * pg_parse_json(), so that we know the lexer has validated it; there's no
+ * in-band signal for invalid formats.
+ */
+static double
+parse_json_number(const char *s)
+{
+ double parsed;
+ int cnt;
+
+ /*
+ * The JSON lexer has already validated the number, which is stricter than
+ * the %f format, so we should be good to use sscanf().
+ */
+ cnt = sscanf(s, "%lf", &parsed);
+
+ if (cnt != 1)
+ {
+ /*
+ * Either the lexer screwed up or our assumption above isn't true, and
+ * either way a developer needs to take a look.
+ */
+ Assert(false);
+ return 0;
+ }
+
+ return parsed;
+}
+
+/*
+ * Parses the "interval" JSON number, corresponding to the number of seconds to
+ * wait between token endpoint requests.
+ *
+ * RFC 8628 is pretty silent on sanity checks for the interval. As a matter of
+ * practicality, round any fractional intervals up to the next second, and clamp
+ * the result at a minimum of one. (Zero-second intervals would result in an
+ * expensive network polling loop.) Tests may remove the lower bound with
+ * PGOAUTHDEBUG, for improved performance.
+ */
+static int
+parse_interval(struct async_ctx *actx, const char *interval_str)
+{
+ double parsed;
+
+ parsed = parse_json_number(interval_str);
+ parsed = ceil(parsed);
+
+ if (parsed < 1)
+ return actx->debugging ? 0 : 1;
+
+ else if (parsed >= INT_MAX)
+ return INT_MAX;
+
+ return parsed;
+}
+
+/*
+ * Parses the "expires_in" JSON number, corresponding to the number of seconds
+ * remaining in the lifetime of the device code request.
+ *
+ * Similar to parse_interval, but we have even fewer requirements for reasonable
+ * values since we don't use the expiration time directly (it's passed to the
+ * PQAUTHDATA_PROMPT_OAUTH_DEVICE hook, in case the application wants to do
+ * something with it). We simply round down and clamp to int range.
+ */
+static int
+parse_expires_in(struct async_ctx *actx, const char *expires_in_str)
+{
+ double parsed;
+
+ parsed = parse_json_number(expires_in_str);
+ parsed = floor(parsed);
+
+ if (parsed >= INT_MAX)
+ return INT_MAX;
+ else if (parsed <= INT_MIN)
+ return INT_MIN;
+
+ return parsed;
+}
+
+/*
+ * Parses the Device Authorization Response (RFC 8628, Sec. 3.2).
+ */
+static bool
+parse_device_authz(struct async_ctx *actx, struct device_authz *authz)
+{
+ struct json_field fields[] = {
+ {"device_code", JSON_TOKEN_STRING, {&authz->device_code}, REQUIRED},
+ {"user_code", JSON_TOKEN_STRING, {&authz->user_code}, REQUIRED},
+ {"verification_uri", JSON_TOKEN_STRING, {&authz->verification_uri}, REQUIRED},
+ {"expires_in", JSON_TOKEN_NUMBER, {&authz->expires_in_str}, REQUIRED},
+
+ /*
+ * Some services (Google, Azure) spell verification_uri differently.
+ * We accept either.
+ */
+ {"verification_url", JSON_TOKEN_STRING, {&authz->verification_uri}, REQUIRED},
+
+ /*
+ * There is no evidence of verification_uri_complete being spelled
+ * with "url" instead with any service provider, so only support
+ * "uri".
+ */
+ {"verification_uri_complete", JSON_TOKEN_STRING, {&authz->verification_uri_complete}, OPTIONAL},
+ {"interval", JSON_TOKEN_NUMBER, {&authz->interval_str}, OPTIONAL},
+
+ {0},
+ };
+
+ if (!parse_oauth_json(actx, fields))
+ return false;
+
+ /*
+ * Parse our numeric fields. Lexing has already completed by this time, so
+ * we at least know they're valid JSON numbers.
+ */
+ if (authz->interval_str)
+ authz->interval = parse_interval(actx, authz->interval_str);
+ else
+ {
+ /*
+ * RFC 8628 specifies 5 seconds as the default value if the server
+ * doesn't provide an interval.
+ */
+ authz->interval = 5;
+ }
+
+ Assert(authz->expires_in_str); /* ensured by parse_oauth_json() */
+ authz->expires_in = parse_expires_in(actx, authz->expires_in_str);
+
+ return true;
+}
+
+/*
+ * Parses the device access token error response (RFC 8628, Sec. 3.5, which
+ * uses the error response defined in RFC 6749, Sec. 5.2).
+ */
+static bool
+parse_token_error(struct async_ctx *actx, struct token_error *err)
+{
+ bool result;
+ struct json_field fields[] = {
+ {"error", JSON_TOKEN_STRING, {&err->error}, REQUIRED},
+
+ {"error_description", JSON_TOKEN_STRING, {&err->error_description}, OPTIONAL},
+
+ {0},
+ };
+
+ result = parse_oauth_json(actx, fields);
+
+ /*
+ * Since token errors are parsed during other active error paths, only
+ * override the errctx if parsing explicitly fails.
+ */
+ if (!result)
+ actx->errctx = "failed to parse token error response";
+
+ return result;
+}
+
+/*
+ * Constructs a message from the token error response and puts it into
+ * actx->errbuf.
+ */
+static void
+record_token_error(struct async_ctx *actx, const struct token_error *err)
+{
+ if (err->error_description)
+ appendPQExpBuffer(&actx->errbuf, "%s ", err->error_description);
+ else
+ {
+ /*
+ * Try to get some more helpful detail into the error string. A 401
+ * status in particular implies that the oauth_client_secret is
+ * missing or wrong.
+ */
+ long response_code;
+
+ CHECK_GETINFO(actx, CURLINFO_RESPONSE_CODE, &response_code, response_code = 0);
+
+ if (response_code == 401)
+ {
+ actx_error(actx, actx->used_basic_auth
+ ? "provider rejected the oauth_client_secret"
+ : "provider requires client authentication, and no oauth_client_secret is set");
+ actx_error_str(actx, " ");
+ }
+ }
+
+ appendPQExpBuffer(&actx->errbuf, "(%s)", err->error);
+}
+
+/*
+ * Parses the device access token response (RFC 8628, Sec. 3.5, which uses the
+ * success response defined in RFC 6749, Sec. 5.1).
+ */
+static bool
+parse_access_token(struct async_ctx *actx, struct token *tok)
+{
+ struct json_field fields[] = {
+ {"access_token", JSON_TOKEN_STRING, {&tok->access_token}, REQUIRED},
+ {"token_type", JSON_TOKEN_STRING, {&tok->token_type}, REQUIRED},
+
+ /*---
+ * We currently have no use for the following OPTIONAL fields:
+ *
+ * - expires_in: This will be important for maintaining a token cache,
+ * but we do not yet implement one.
+ *
+ * - refresh_token: Ditto.
+ *
+ * - scope: This is only sent when the authorization server sees fit to
+ * change our scope request. It's not clear what we should do
+ * about this; either it's been done as a matter of policy, or
+ * the user has explicitly denied part of the authorization,
+ * and either way the server-side validator is in a better
+ * place to complain if the change isn't acceptable.
+ */
+
+ {0},
+ };
+
+ return parse_oauth_json(actx, fields);
+}
+
+/*
+ * libcurl Multi Setup/Callbacks
+ */
+
+/*
+ * Sets up the actx->mux, which is the altsock that PQconnectPoll clients will
+ * select() on instead of the Postgres socket during OAuth negotiation.
+ *
+ * This is just an epoll set or kqueue abstracting multiple other descriptors.
+ * For epoll, the timerfd is always part of the set; it's just disabled when
+ * we're not using it. For kqueue, the "timerfd" is actually a second kqueue
+ * instance which is only added to the set when needed.
+ */
+static bool
+setup_multiplexer(struct async_ctx *actx)
+{
+#ifdef HAVE_SYS_EPOLL_H
+ struct epoll_event ev = {.events = EPOLLIN};
+
+ actx->mux = epoll_create1(EPOLL_CLOEXEC);
+ if (actx->mux < 0)
+ {
+ actx_error(actx, "failed to create epoll set: %m");
+ return false;
+ }
+
+ actx->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
+ if (actx->timerfd < 0)
+ {
+ actx_error(actx, "failed to create timerfd: %m");
+ return false;
+ }
+
+ if (epoll_ctl(actx->mux, EPOLL_CTL_ADD, actx->timerfd, &ev) < 0)
+ {
+ actx_error(actx, "failed to add timerfd to epoll set: %m");
+ return false;
+ }
+
+ return true;
+#endif
+#ifdef HAVE_SYS_EVENT_H
+ actx->mux = kqueue();
+ if (actx->mux < 0)
+ {
+ /*- translator: the term "kqueue" (kernel queue) should not be translated */
+ actx_error(actx, "failed to create kqueue: %m");
+ return false;
+ }
+
+ /*
+ * Originally, we set EVFILT_TIMER directly on the top-level multiplexer.
+ * This makes it difficult to implement timer_expired(), though, so now we
+ * set EVFILT_TIMER on a separate actx->timerfd, which is chained to
+ * actx->mux while the timer is active.
+ */
+ actx->timerfd = kqueue();
+ if (actx->timerfd < 0)
+ {
+ actx_error(actx, "failed to create timer kqueue: %m");
+ return false;
+ }
+
+ return true;
+#endif
+
+ actx_error(actx, "libpq does not support the Device Authorization flow on this platform");
+ return false;
+}
+
+/*
+ * Adds and removes sockets from the multiplexer set, as directed by the
+ * libcurl multi handle.
+ */
+static int
+register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
+ void *socketp)
+{
+#ifdef HAVE_SYS_EPOLL_H
+ struct async_ctx *actx = ctx;
+ struct epoll_event ev = {0};
+ int res;
+ int op = EPOLL_CTL_ADD;
+
+ switch (what)
+ {
+ case CURL_POLL_IN:
+ ev.events = EPOLLIN;
+ break;
+
+ case CURL_POLL_OUT:
+ ev.events = EPOLLOUT;
+ break;
+
+ case CURL_POLL_INOUT:
+ ev.events = EPOLLIN | EPOLLOUT;
+ break;
+
+ case CURL_POLL_REMOVE:
+ op = EPOLL_CTL_DEL;
+ break;
+
+ default:
+ actx_error(actx, "unknown libcurl socket operation: %d", what);
+ return -1;
+ }
+
+ res = epoll_ctl(actx->mux, op, socket, &ev);
+ if (res < 0 && errno == EEXIST)
+ {
+ /* We already had this socket in the pollset. */
+ op = EPOLL_CTL_MOD;
+ res = epoll_ctl(actx->mux, op, socket, &ev);
+ }
+
+ if (res < 0)
+ {
+ switch (op)
+ {
+ case EPOLL_CTL_ADD:
+ actx_error(actx, "could not add to epoll set: %m");
+ break;
+
+ case EPOLL_CTL_DEL:
+ actx_error(actx, "could not delete from epoll set: %m");
+ break;
+
+ default:
+ actx_error(actx, "could not update epoll set: %m");
+ }
+
+ return -1;
+ }
+
+ return 0;
+#endif
+#ifdef HAVE_SYS_EVENT_H
+ struct async_ctx *actx = ctx;
+ struct kevent ev[2] = {{0}};
+ struct kevent ev_out[2];
+ struct timespec timeout = {0};
+ int nev = 0;
+ int res;
+
+ switch (what)
+ {
+ case CURL_POLL_IN:
+ EV_SET(&ev[nev], socket, EVFILT_READ, EV_ADD | EV_RECEIPT, 0, 0, 0);
+ nev++;
+ break;
+
+ case CURL_POLL_OUT:
+ EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_ADD | EV_RECEIPT, 0, 0, 0);
+ nev++;
+ break;
+
+ case CURL_POLL_INOUT:
+ EV_SET(&ev[nev], socket, EVFILT_READ, EV_ADD | EV_RECEIPT, 0, 0, 0);
+ nev++;
+ EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_ADD | EV_RECEIPT, 0, 0, 0);
+ nev++;
+ break;
+
+ case CURL_POLL_REMOVE:
+
+ /*
+ * We don't know which of these is currently registered, perhaps
+ * both, so we try to remove both. This means we need to tolerate
+ * ENOENT below.
+ */
+ EV_SET(&ev[nev], socket, EVFILT_READ, EV_DELETE | EV_RECEIPT, 0, 0, 0);
+ nev++;
+ EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_DELETE | EV_RECEIPT, 0, 0, 0);
+ nev++;
+ break;
+
+ default:
+ actx_error(actx, "unknown libcurl socket operation: %d", what);
+ return -1;
+ }
+
+ res = kevent(actx->mux, ev, nev, ev_out, lengthof(ev_out), &timeout);
+ if (res < 0)
+ {
+ actx_error(actx, "could not modify kqueue: %m");
+ return -1;
+ }
+
+ /*
+ * We can't use the simple errno version of kevent, because we need to
+ * skip over ENOENT while still allowing a second change to be processed.
+ * So we need a longer-form error checking loop.
+ */
+ for (int i = 0; i < res; ++i)
+ {
+ /*
+ * EV_RECEIPT should guarantee one EV_ERROR result for every change,
+ * whether successful or not. Failed entries contain a non-zero errno
+ * in the data field.
+ */
+ Assert(ev_out[i].flags & EV_ERROR);
+
+ errno = ev_out[i].data;
+ if (errno && errno != ENOENT)
+ {
+ switch (what)
+ {
+ case CURL_POLL_REMOVE:
+ actx_error(actx, "could not delete from kqueue: %m");
+ break;
+ default:
+ actx_error(actx, "could not add to kqueue: %m");
+ }
+ return -1;
+ }
+ }
+
+ return 0;
+#endif
+
+ actx_error(actx, "libpq does not support multiplexer sockets on this platform");
+ return -1;
+}
+
+/*
+ * Enables or disables the timer in the multiplexer set. The timeout value is
+ * in milliseconds (negative values disable the timer).
+ *
+ * For epoll, rather than continually adding and removing the timer, we keep it
+ * in the set at all times and just disarm it when it's not needed. For kqueue,
+ * the timer is removed completely when disabled to prevent stale timeouts from
+ * remaining in the queue.
+ */
+static bool
+set_timer(struct async_ctx *actx, long timeout)
+{
+#if HAVE_SYS_EPOLL_H
+ struct itimerspec spec = {0};
+
+ if (timeout < 0)
+ {
+ /* the zero itimerspec will disarm the timer below */
+ }
+ else if (timeout == 0)
+ {
+ /*
+ * A zero timeout means libcurl wants us to call back immediately.
+ * That's not technically an option for timerfd, but we can make the
+ * timeout ridiculously short.
+ */
+ spec.it_value.tv_nsec = 1;
+ }
+ else
+ {
+ spec.it_value.tv_sec = timeout / 1000;
+ spec.it_value.tv_nsec = (timeout % 1000) * 1000000;
+ }
+
+ if (timerfd_settime(actx->timerfd, 0 /* no flags */ , &spec, NULL) < 0)
+ {
+ actx_error(actx, "setting timerfd to %ld: %m", timeout);
+ return false;
+ }
+
+ return true;
+#endif
+#ifdef HAVE_SYS_EVENT_H
+ struct kevent ev;
+
+ /* Enable/disable the timer itself. */
+ EV_SET(&ev, 1, EVFILT_TIMER, timeout < 0 ? EV_DELETE : (EV_ADD | EV_ONESHOT),
+ 0, timeout, 0);
+ if (kevent(actx->timerfd, &ev, 1, NULL, 0, NULL) < 0 && errno != ENOENT)
+ {
+ actx_error(actx, "setting kqueue timer to %ld: %m", timeout);
+ return false;
+ }
+
+ /*
+ * Add/remove the timer to/from the mux. (In contrast with epoll, if we
+ * allowed the timer to remain registered here after being disabled, the
+ * mux queue would retain any previous stale timeout notifications and
+ * remain readable.)
+ */
+ EV_SET(&ev, actx->timerfd, EVFILT_READ, timeout < 0 ? EV_DELETE : EV_ADD,
+ 0, 0, 0);
+ if (kevent(actx->mux, &ev, 1, NULL, 0, NULL) < 0 && errno != ENOENT)
+ {
+ actx_error(actx, "could not update timer on kqueue: %m");
+ return false;
+ }
+
+ return true;
+#endif
+
+ actx_error(actx, "libpq does not support timers on this platform");
+ return false;
+}
+
+/*
+ * Returns 1 if the timeout in the multiplexer set has expired since the last
+ * call to set_timer(), 0 if the timer is still running, or -1 (with an
+ * actx_error() report) if the timer cannot be queried.
+ */
+static int
+timer_expired(struct async_ctx *actx)
+{
+#if HAVE_SYS_EPOLL_H
+ struct itimerspec spec = {0};
+
+ if (timerfd_gettime(actx->timerfd, &spec) < 0)
+ {
+ actx_error(actx, "getting timerfd value: %m");
+ return -1;
+ }
+
+ /*
+ * This implementation assumes we're using single-shot timers. If you
+ * change to using intervals, you'll need to reimplement this function
+ * too, possibly with the read() or select() interfaces for timerfd.
+ */
+ Assert(spec.it_interval.tv_sec == 0
+ && spec.it_interval.tv_nsec == 0);
+
+ /* If the remaining time to expiration is zero, we're done. */
+ return (spec.it_value.tv_sec == 0
+ && spec.it_value.tv_nsec == 0);
+#endif
+#ifdef HAVE_SYS_EVENT_H
+ int res;
+
+ /* Is the timer queue ready? */
+ res = PQsocketPoll(actx->timerfd, 1 /* forRead */ , 0, 0);
+ if (res < 0)
+ {
+ actx_error(actx, "checking kqueue for timeout: %m");
+ return -1;
+ }
+
+ return (res > 0);
+#endif
+
+ actx_error(actx, "libpq does not support timers on this platform");
+ return -1;
+}
+
+/*
+ * Adds or removes timeouts from the multiplexer set, as directed by the
+ * libcurl multi handle.
+ */
+static int
+register_timer(CURLM *curlm, long timeout, void *ctx)
+{
+ struct async_ctx *actx = ctx;
+
+ /*
+ * There might be an optimization opportunity here: if timeout == 0, we
+ * could signal drive_request to immediately call
+ * curl_multi_socket_action, rather than returning all the way up the
+ * stack only to come right back. But it's not clear that the additional
+ * code complexity is worth it.
+ */
+ if (!set_timer(actx, timeout))
+ return -1; /* actx_error already called */
+
+ return 0;
+}
+
+/*
+ * Prints Curl request debugging information to stderr.
+ *
+ * Note that this will expose a number of critical secrets, so users have to opt
+ * into this (see PGOAUTHDEBUG).
+ */
+static int
+debug_callback(CURL *handle, curl_infotype type, char *data, size_t size,
+ void *clientp)
+{
+ const char *prefix;
+ bool printed_prefix = false;
+ PQExpBufferData buf;
+
+ /* Prefixes are modeled off of the default libcurl debug output. */
+ switch (type)
+ {
+ case CURLINFO_TEXT:
+ prefix = "*";
+ break;
+
+ case CURLINFO_HEADER_IN: /* fall through */
+ case CURLINFO_DATA_IN:
+ prefix = "<";
+ break;
+
+ case CURLINFO_HEADER_OUT: /* fall through */
+ case CURLINFO_DATA_OUT:
+ prefix = ">";
+ break;
+
+ default:
+ return 0;
+ }
+
+ initPQExpBuffer(&buf);
+
+ /*
+ * Split the output into lines for readability; sometimes multiple headers
+ * are included in a single call. We also don't allow unprintable ASCII
+ * through without a basic <XX> escape.
+ */
+ for (int i = 0; i < size; i++)
+ {
+ char c = data[i];
+
+ if (!printed_prefix)
+ {
+ appendPQExpBuffer(&buf, "[libcurl] %s ", prefix);
+ printed_prefix = true;
+ }
+
+ if (c >= 0x20 && c <= 0x7E)
+ appendPQExpBufferChar(&buf, c);
+ else if ((type == CURLINFO_HEADER_IN
+ || type == CURLINFO_HEADER_OUT
+ || type == CURLINFO_TEXT)
+ && (c == '\r' || c == '\n'))
+ {
+ /*
+ * Don't bother emitting <0D><0A> for headers and text; it's not
+ * helpful noise.
+ */
+ }
+ else
+ appendPQExpBuffer(&buf, "<%02X>", c);
+
+ if (c == '\n')
+ {
+ appendPQExpBufferChar(&buf, c);
+ printed_prefix = false;
+ }
+ }
+
+ if (printed_prefix)
+ appendPQExpBufferChar(&buf, '\n'); /* finish the line */
+
+ fprintf(stderr, "%s", buf.data);
+ termPQExpBuffer(&buf);
+ return 0;
+}
+
+/*
+ * Initializes the two libcurl handles in the async_ctx. The multi handle,
+ * actx->curlm, is what drives the asynchronous engine and tells us what to do
+ * next. The easy handle, actx->curl, encapsulates the state for a single
+ * request/response. It's added to the multi handle as needed, during
+ * start_request().
+ */
+static bool
+setup_curl_handles(struct async_ctx *actx)
+{
+ /*
+ * Create our multi handle. This encapsulates the entire conversation with
+ * libcurl for this connection.
+ */
+ actx->curlm = curl_multi_init();
+ if (!actx->curlm)
+ {
+ /* We don't get a lot of feedback on the failure reason. */
+ actx_error(actx, "failed to create libcurl multi handle");
+ return false;
+ }
+
+ /*
+ * The multi handle tells us what to wait on using two callbacks. These
+ * will manipulate actx->mux as needed.
+ */
+ CHECK_MSETOPT(actx, CURLMOPT_SOCKETFUNCTION, register_socket, return false);
+ CHECK_MSETOPT(actx, CURLMOPT_SOCKETDATA, actx, return false);
+ CHECK_MSETOPT(actx, CURLMOPT_TIMERFUNCTION, register_timer, return false);
+ CHECK_MSETOPT(actx, CURLMOPT_TIMERDATA, actx, return false);
+
+ /*
+ * Set up an easy handle. All of our requests are made serially, so we
+ * only ever need to keep track of one.
+ */
+ actx->curl = curl_easy_init();
+ if (!actx->curl)
+ {
+ actx_error(actx, "failed to create libcurl handle");
+ return false;
+ }
+
+ /*
+ * Multi-threaded applications must set CURLOPT_NOSIGNAL. This requires us
+ * to handle the possibility of SIGPIPE ourselves using pq_block_sigpipe;
+ * see pg_fe_run_oauth_flow().
+ *
+ * NB: If libcurl is not built against a friendly DNS resolver (c-ares or
+ * threaded), setting this option prevents DNS lookups from timing out
+ * correctly. We warn about this situation at configure time.
+ *
+ * TODO: Perhaps there's a clever way to warn the user about synchronous
+ * DNS at runtime too? It's not immediately clear how to do that in a
+ * helpful way: for many standard single-threaded use cases, the user
+ * might not care at all, so spraying warnings to stderr would probably do
+ * more harm than good.
+ */
+ CHECK_SETOPT(actx, CURLOPT_NOSIGNAL, 1L, return false);
+
+ if (actx->debugging)
+ {
+ /*
+ * Set a callback for retrieving error information from libcurl, the
+ * function only takes effect when CURLOPT_VERBOSE has been set so
+ * make sure the order is kept.
+ */
+ CHECK_SETOPT(actx, CURLOPT_DEBUGFUNCTION, debug_callback, return false);
+ CHECK_SETOPT(actx, CURLOPT_VERBOSE, 1L, return false);
+ }
+
+ CHECK_SETOPT(actx, CURLOPT_ERRORBUFFER, actx->curl_err, return false);
+
+ /*
+ * Only HTTPS is allowed. (Debug mode additionally allows HTTP; this is
+ * intended for testing only.)
+ *
+ * There's a bit of unfortunate complexity around the choice of
+ * CURLoption. CURLOPT_PROTOCOLS is deprecated in modern Curls, but its
+ * replacement didn't show up until relatively recently.
+ */
+ {
+#if CURL_AT_LEAST_VERSION(7, 85, 0)
+ const CURLoption popt = CURLOPT_PROTOCOLS_STR;
+ const char *protos = "https";
+ const char *const unsafe = "https,http";
+#else
+ const CURLoption popt = CURLOPT_PROTOCOLS;
+ long protos = CURLPROTO_HTTPS;
+ const long unsafe = CURLPROTO_HTTPS | CURLPROTO_HTTP;
+#endif
+
+ if (actx->debugging)
+ protos = unsafe;
+
+ CHECK_SETOPT(actx, popt, protos, return false);
+ }
+
+ /*
+ * If we're in debug mode, allow the developer to change the trusted CA
+ * list. For now, this is not something we expose outside of the UNSAFE
+ * mode, because it's not clear that it's useful in production: both libpq
+ * and the user's browser must trust the same authorization servers for
+ * the flow to work at all, so any changes to the roots are likely to be
+ * done system-wide.
+ */
+ if (actx->debugging)
+ {
+ const char *env;
+
+ if ((env = getenv("PGOAUTHCAFILE")) != NULL)
+ CHECK_SETOPT(actx, CURLOPT_CAINFO, env, return false);
+ }
+
+ /*
+ * Suppress the Accept header to make our request as minimal as possible.
+ * (Ideally we would set it to "application/json" instead, but OpenID is
+ * pretty strict when it comes to provider behavior, so we have to check
+ * what comes back anyway.)
+ */
+ actx->headers = curl_slist_append(actx->headers, "Accept:");
+ if (actx->headers == NULL)
+ {
+ actx_error(actx, "out of memory");
+ return false;
+ }
+ CHECK_SETOPT(actx, CURLOPT_HTTPHEADER, actx->headers, return false);
+
+ return true;
+}
+
+/*
+ * Generic HTTP Request Handlers
+ */
+
+/*
+ * Response callback from libcurl which appends the response body into
+ * actx->work_data (see start_request()). The maximum size of the data is
+ * defined by CURL_MAX_WRITE_SIZE which by default is 16kb (and can only be
+ * changed by recompiling libcurl).
+ */
+static size_t
+append_data(char *buf, size_t size, size_t nmemb, void *userdata)
+{
+ struct async_ctx *actx = userdata;
+ PQExpBuffer resp = &actx->work_data;
+ size_t len = size * nmemb;
+
+ /* In case we receive data over the threshold, abort the transfer */
+ if ((resp->len + len) > MAX_OAUTH_RESPONSE_SIZE)
+ {
+ actx_error(actx, "response is too large");
+ return 0;
+ }
+
+ /* The data passed from libcurl is not null-terminated */
+ appendBinaryPQExpBuffer(resp, buf, len);
+
+ /*
+ * Signal an error in order to abort the transfer in case we ran out of
+ * memory in accepting the data.
+ */
+ if (PQExpBufferBroken(resp))
+ {
+ actx_error(actx, "out of memory");
+ return 0;
+ }
+
+ return len;
+}
+
+/*
+ * Begins an HTTP request on the multi handle. The caller should have set up all
+ * request-specific options on actx->curl first. The server's response body will
+ * be accumulated in actx->work_data (which will be reset, so don't store
+ * anything important there across this call).
+ *
+ * Once a request is queued, it can be driven to completion via drive_request().
+ * If actx->running is zero upon return, the request has already finished and
+ * drive_request() can be called without returning control to the client.
+ */
+static bool
+start_request(struct async_ctx *actx)
+{
+ CURLMcode err;
+
+ resetPQExpBuffer(&actx->work_data);
+ CHECK_SETOPT(actx, CURLOPT_WRITEFUNCTION, append_data, return false);
+ CHECK_SETOPT(actx, CURLOPT_WRITEDATA, actx, return false);
+
+ err = curl_multi_add_handle(actx->curlm, actx->curl);
+ if (err)
+ {
+ actx_error(actx, "failed to queue HTTP request: %s",
+ curl_multi_strerror(err));
+ return false;
+ }
+
+ /*
+ * actx->running tracks the number of running handles, so we can
+ * immediately call back if no waiting is needed.
+ *
+ * Even though this is nominally an asynchronous process, there are some
+ * operations that can synchronously fail by this point (e.g. connections
+ * to closed local ports) or even synchronously succeed if the stars align
+ * (all the libcurl connection caches hit and the server is fast).
+ */
+ err = curl_multi_socket_action(actx->curlm, CURL_SOCKET_TIMEOUT, 0, &actx->running);
+ if (err)
+ {
+ actx_error(actx, "asynchronous HTTP request failed: %s",
+ curl_multi_strerror(err));
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * CURL_IGNORE_DEPRECATION was added in 7.87.0. If it's not defined, we can make
+ * it a no-op.
+ */
+#ifndef CURL_IGNORE_DEPRECATION
+#define CURL_IGNORE_DEPRECATION(x) x
+#endif
+
+/*
+ * Drives the multi handle towards completion. The caller should have already
+ * set up an asynchronous request via start_request().
+ */
+static PostgresPollingStatusType
+drive_request(struct async_ctx *actx)
+{
+ CURLMcode err;
+ CURLMsg *msg;
+ int msgs_left;
+ bool done;
+
+ if (actx->running)
+ {
+ /*---
+ * There's an async request in progress. Pump the multi handle.
+ *
+ * curl_multi_socket_all() is officially deprecated, because it's
+ * inefficient and pointless if your event loop has already handed you
+ * the exact sockets that are ready. But that's not our use case --
+ * our client has no way to tell us which sockets are ready. (They
+ * don't even know there are sockets to begin with.)
+ *
+ * We can grab the list of triggered events from the multiplexer
+ * ourselves, but that's effectively what curl_multi_socket_all() is
+ * going to do. And there are currently no plans for the Curl project
+ * to remove or break this API, so ignore the deprecation. See
+ *
+ * https://curl.se/mail/lib-2024-11/0028.html
+ *
+ */
+ CURL_IGNORE_DEPRECATION(
+ err = curl_multi_socket_all(actx->curlm, &actx->running);
+ )
+
+ if (err)
+ {
+ actx_error(actx, "asynchronous HTTP request failed: %s",
+ curl_multi_strerror(err));
+ return PGRES_POLLING_FAILED;
+ }
+
+ if (actx->running)
+ {
+ /* We'll come back again. */
+ return PGRES_POLLING_READING;
+ }
+ }
+
+ done = false;
+ while ((msg = curl_multi_info_read(actx->curlm, &msgs_left)) != NULL)
+ {
+ if (msg->msg != CURLMSG_DONE)
+ {
+ /*
+ * Future libcurl versions may define new message types; we don't
+ * know how to handle them, so we'll ignore them.
+ */
+ continue;
+ }
+
+ /* First check the status of the request itself. */
+ if (msg->data.result != CURLE_OK)
+ {
+ /*
+ * If a more specific error hasn't already been reported, use
+ * libcurl's description.
+ */
+ if (actx->errbuf.len == 0)
+ actx_error_str(actx, curl_easy_strerror(msg->data.result));
+
+ return PGRES_POLLING_FAILED;
+ }
+
+ /* Now remove the finished handle; we'll add it back later if needed. */
+ err = curl_multi_remove_handle(actx->curlm, msg->easy_handle);
+ if (err)
+ {
+ actx_error(actx, "libcurl easy handle removal failed: %s",
+ curl_multi_strerror(err));
+ return PGRES_POLLING_FAILED;
+ }
+
+ done = true;
+ }
+
+ /* Sanity check. */
+ if (!done)
+ {
+ actx_error(actx, "no result was retrieved for the finished handle");
+ return PGRES_POLLING_FAILED;
+ }
+
+ return PGRES_POLLING_OK;
+}
+
+/*
+ * URL-Encoding Helpers
+ */
+
+/*
+ * Encodes a string using the application/x-www-form-urlencoded format, and
+ * appends it to the given buffer.
+ */
+static void
+append_urlencoded(PQExpBuffer buf, const char *s)
+{
+ char *escaped;
+ char *haystack;
+ char *match;
+
+ /* The first parameter to curl_easy_escape is deprecated by Curl */
+ escaped = curl_easy_escape(NULL, s, 0);
+ if (!escaped)
+ {
+ termPQExpBuffer(buf); /* mark the buffer broken */
+ return;
+ }
+
+ /*
+ * curl_easy_escape() almost does what we want, but we need the
+ * query-specific flavor which uses '+' instead of '%20' for spaces. The
+ * Curl command-line tool does this with a simple search-and-replace, so
+ * follow its lead.
+ */
+ haystack = escaped;
+
+ while ((match = strstr(haystack, "%20")) != NULL)
+ {
+ /* Append the unmatched portion, followed by the plus sign. */
+ appendBinaryPQExpBuffer(buf, haystack, match - haystack);
+ appendPQExpBufferChar(buf, '+');
+
+ /* Keep searching after the match. */
+ haystack = match + 3 /* strlen("%20") */ ;
+ }
+
+ /* Push the remainder of the string onto the buffer. */
+ appendPQExpBufferStr(buf, haystack);
+
+ curl_free(escaped);
+}
+
+/*
+ * Convenience wrapper for encoding a single string. Returns NULL on allocation
+ * failure.
+ */
+static char *
+urlencode(const char *s)
+{
+ PQExpBufferData buf;
+
+ initPQExpBuffer(&buf);
+ append_urlencoded(&buf, s);
+
+ return PQExpBufferDataBroken(buf) ? NULL : buf.data;
+}
+
+/*
+ * Appends a key/value pair to the end of an application/x-www-form-urlencoded
+ * list.
+ */
+static void
+build_urlencoded(PQExpBuffer buf, const char *key, const char *value)
+{
+ if (buf->len)
+ appendPQExpBufferChar(buf, '&');
+
+ append_urlencoded(buf, key);
+ appendPQExpBufferChar(buf, '=');
+ append_urlencoded(buf, value);
+}
+
+/*
+ * Specific HTTP Request Handlers
+ *
+ * This is finally the beginning of the actual application logic. Generally
+ * speaking, a single request consists of a start_* and a finish_* step, with
+ * drive_request() pumping the machine in between.
+ */
+
+/*
+ * Queue an OpenID Provider Configuration Request:
+ *
+ * https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
+ * https://www.rfc-editor.org/rfc/rfc8414#section-3.1
+ *
+ * This is done first to get the endpoint URIs we need to contact and to make
+ * sure the provider provides a device authorization flow. finish_discovery()
+ * will fill in actx->provider.
+ */
+static bool
+start_discovery(struct async_ctx *actx, const char *discovery_uri)
+{
+ CHECK_SETOPT(actx, CURLOPT_HTTPGET, 1L, return false);
+ CHECK_SETOPT(actx, CURLOPT_URL, discovery_uri, return false);
+
+ return start_request(actx);
+}
+
+static bool
+finish_discovery(struct async_ctx *actx)
+{
+ long response_code;
+
+ /*----
+ * Now check the response. OIDC Discovery 1.0 is pretty strict:
+ *
+ * A successful response MUST use the 200 OK HTTP status code and
+ * return a JSON object using the application/json content type that
+ * contains a set of Claims as its members that are a subset of the
+ * Metadata values defined in Section 3.
+ *
+ * Compared to standard HTTP semantics, this makes life easy -- we don't
+ * need to worry about redirections (which would call the Issuer host
+ * validation into question), or non-authoritative responses, or any other
+ * complications.
+ */
+ CHECK_GETINFO(actx, CURLINFO_RESPONSE_CODE, &response_code, return false);
+
+ if (response_code != 200)
+ {
+ actx_error(actx, "unexpected response code %ld", response_code);
+ return false;
+ }
+
+ /*
+ * Pull the fields we care about from the document.
+ */
+ actx->errctx = "failed to parse OpenID discovery document";
+ if (!parse_provider(actx, &actx->provider))
+ return false; /* error message already set */
+
+ /*
+ * Fill in any defaults for OPTIONAL/RECOMMENDED fields we care about.
+ */
+ if (!actx->provider.grant_types_supported)
+ {
+ /*
+ * Per Section 3, the default is ["authorization_code", "implicit"].
+ */
+ struct curl_slist *temp = actx->provider.grant_types_supported;
+
+ temp = curl_slist_append(temp, "authorization_code");
+ if (temp)
+ {
+ temp = curl_slist_append(temp, "implicit");
+ }
+
+ if (!temp)
+ {
+ actx_error(actx, "out of memory");
+ return false;
+ }
+
+ actx->provider.grant_types_supported = temp;
+ }
+
+ return true;
+}
+
+/*
+ * Ensure that the discovery document is provided by the expected issuer.
+ * Currently, issuers are statically configured in the connection string.
+ */
+static bool
+check_issuer(struct async_ctx *actx, PGconn *conn)
+{
+ const struct provider *provider = &actx->provider;
+
+ Assert(conn->oauth_issuer_id); /* ensured by setup_oauth_parameters() */
+ Assert(provider->issuer); /* ensured by parse_provider() */
+
+ /*---
+ * We require strict equality for issuer identifiers -- no path or case
+ * normalization, no substitution of default ports and schemes, etc. This
+ * is done to match the rules in OIDC Discovery Sec. 4.3 for config
+ * validation:
+ *
+ * The issuer value returned MUST be identical to the Issuer URL that
+ * was used as the prefix to /.well-known/openid-configuration to
+ * retrieve the configuration information.
+ *
+ * as well as the rules set out in RFC 9207 for avoiding mix-up attacks:
+ *
+ * Clients MUST then [...] compare the result to the issuer identifier
+ * of the authorization server where the authorization request was
+ * sent to. This comparison MUST use simple string comparison as defined
+ * in Section 6.2.1 of [RFC3986].
+ */
+ if (strcmp(conn->oauth_issuer_id, provider->issuer) != 0)
+ {
+ actx_error(actx,
+ "the issuer identifier (%s) does not match oauth_issuer (%s)",
+ provider->issuer, conn->oauth_issuer_id);
+ return false;
+ }
+
+ return true;
+}
+
+#define HTTPS_SCHEME "https://"
+#define OAUTH_GRANT_TYPE_DEVICE_CODE "urn:ietf:params:oauth:grant-type:device_code"
+
+/*
+ * Ensure that the provider supports the Device Authorization flow (i.e. it
+ * provides an authorization endpoint, and both the token and authorization
+ * endpoint URLs seem reasonable).
+ */
+static bool
+check_for_device_flow(struct async_ctx *actx)
+{
+ const struct provider *provider = &actx->provider;
+
+ Assert(provider->issuer); /* ensured by parse_provider() */
+ Assert(provider->token_endpoint); /* ensured by parse_provider() */
+
+ if (!provider->device_authorization_endpoint)
+ {
+ actx_error(actx,
+ "issuer \"%s\" does not provide a device authorization endpoint",
+ provider->issuer);
+ return false;
+ }
+
+ /*
+ * The original implementation checked that OAUTH_GRANT_TYPE_DEVICE_CODE
+ * was present in the discovery document's grant_types_supported list. MS
+ * Entra does not advertise this grant type, though, and since it doesn't
+ * make sense to stand up a device_authorization_endpoint without also
+ * accepting device codes at the token_endpoint, that's the only thing we
+ * currently require.
+ */
+
+ /*
+ * Although libcurl will fail later if the URL contains an unsupported
+ * scheme, that error message is going to be a bit opaque. This is a
+ * decent time to bail out if we're not using HTTPS for the endpoints
+ * we'll use for the flow.
+ */
+ if (!actx->debugging)
+ {
+ if (pg_strncasecmp(provider->device_authorization_endpoint,
+ HTTPS_SCHEME, strlen(HTTPS_SCHEME)) != 0)
+ {
+ actx_error(actx,
+ "device authorization endpoint \"%s\" must use HTTPS",
+ provider->device_authorization_endpoint);
+ return false;
+ }
+
+ if (pg_strncasecmp(provider->token_endpoint,
+ HTTPS_SCHEME, strlen(HTTPS_SCHEME)) != 0)
+ {
+ actx_error(actx,
+ "token endpoint \"%s\" must use HTTPS",
+ provider->token_endpoint);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Adds the client ID (and secret, if provided) to the current request, using
+ * either HTTP headers or the request body.
+ */
+static bool
+add_client_identification(struct async_ctx *actx, PQExpBuffer reqbody, PGconn *conn)
+{
+ bool success = false;
+ char *username = NULL;
+ char *password = NULL;
+
+ if (conn->oauth_client_secret) /* Zero-length secrets are permitted! */
+ {
+ /*----
+ * Use HTTP Basic auth to send the client_id and secret. Per RFC 6749,
+ * Sec. 2.3.1,
+ *
+ * Including the client credentials in the request-body using the
+ * two parameters is NOT RECOMMENDED and SHOULD be limited to
+ * clients unable to directly utilize the HTTP Basic authentication
+ * scheme (or other password-based HTTP authentication schemes).
+ *
+ * Additionally:
+ *
+ * The client identifier is encoded using the
+ * "application/x-www-form-urlencoded" encoding algorithm per Appendix
+ * B, and the encoded value is used as the username; the client
+ * password is encoded using the same algorithm and used as the
+ * password.
+ *
+ * (Appendix B modifies application/x-www-form-urlencoded by requiring
+ * an initial UTF-8 encoding step. Since the client ID and secret must
+ * both be 7-bit ASCII -- RFC 6749 Appendix A -- we don't worry about
+ * that in this function.)
+ *
+ * client_id is not added to the request body in this case. Not only
+ * would it be redundant, but some providers in the wild (e.g. Okta)
+ * refuse to accept it.
+ */
+ username = urlencode(conn->oauth_client_id);
+ password = urlencode(conn->oauth_client_secret);
+
+ if (!username || !password)
+ {
+ actx_error(actx, "out of memory");
+ goto cleanup;
+ }
+
+ CHECK_SETOPT(actx, CURLOPT_HTTPAUTH, CURLAUTH_BASIC, goto cleanup);
+ CHECK_SETOPT(actx, CURLOPT_USERNAME, username, goto cleanup);
+ CHECK_SETOPT(actx, CURLOPT_PASSWORD, password, goto cleanup);
+
+ actx->used_basic_auth = true;
+ }
+ else
+ {
+ /*
+ * If we're not otherwise authenticating, client_id is REQUIRED in the
+ * request body.
+ */
+ build_urlencoded(reqbody, "client_id", conn->oauth_client_id);
+
+ CHECK_SETOPT(actx, CURLOPT_HTTPAUTH, CURLAUTH_NONE, goto cleanup);
+ actx->used_basic_auth = false;
+ }
+
+ success = true;
+
+cleanup:
+ free(username);
+ free(password);
+
+ return success;
+}
+
+/*
+ * Queue a Device Authorization Request:
+ *
+ * https://www.rfc-editor.org/rfc/rfc8628#section-3.1
+ *
+ * This is the second step. We ask the provider to verify the end user out of
+ * band and authorize us to act on their behalf; it will give us the required
+ * nonces for us to later poll the request status, which we'll grab in
+ * finish_device_authz().
+ */
+static bool
+start_device_authz(struct async_ctx *actx, PGconn *conn)
+{
+ const char *device_authz_uri = actx->provider.device_authorization_endpoint;
+ PQExpBuffer work_buffer = &actx->work_data;
+
+ Assert(conn->oauth_client_id); /* ensured by setup_oauth_parameters() */
+ Assert(device_authz_uri); /* ensured by check_for_device_flow() */
+
+ /* Construct our request body. */
+ resetPQExpBuffer(work_buffer);
+ if (conn->oauth_scope && conn->oauth_scope[0])
+ build_urlencoded(work_buffer, "scope", conn->oauth_scope);
+
+ if (!add_client_identification(actx, work_buffer, conn))
+ return false;
+
+ if (PQExpBufferBroken(work_buffer))
+ {
+ actx_error(actx, "out of memory");
+ return false;
+ }
+
+ /* Make our request. */
+ CHECK_SETOPT(actx, CURLOPT_URL, device_authz_uri, return false);
+ CHECK_SETOPT(actx, CURLOPT_COPYPOSTFIELDS, work_buffer->data, return false);
+
+ return start_request(actx);
+}
+
+static bool
+finish_device_authz(struct async_ctx *actx)
+{
+ long response_code;
+
+ CHECK_GETINFO(actx, CURLINFO_RESPONSE_CODE, &response_code, return false);
+
+ /*
+ * Per RFC 8628, Section 3, a successful device authorization response
+ * uses 200 OK.
+ */
+ if (response_code == 200)
+ {
+ actx->errctx = "failed to parse device authorization";
+ if (!parse_device_authz(actx, &actx->authz))
+ return false; /* error message already set */
+
+ return true;
+ }
+
+ /*
+ * The device authorization endpoint uses the same error response as the
+ * token endpoint, so the error handling roughly follows
+ * finish_token_request(). The key difference is that an error here is
+ * immediately fatal.
+ */
+ if (response_code == 400 || response_code == 401)
+ {
+ struct token_error err = {0};
+
+ if (!parse_token_error(actx, &err))
+ {
+ free_token_error(&err);
+ return false;
+ }
+
+ /* Copy the token error into the context error buffer */
+ record_token_error(actx, &err);
+
+ free_token_error(&err);
+ return false;
+ }
+
+ /* Any other response codes are considered invalid */
+ actx_error(actx, "unexpected response code %ld", response_code);
+ return false;
+}
+
+/*
+ * Queue an Access Token Request:
+ *
+ * https://www.rfc-editor.org/rfc/rfc6749#section-4.1.3
+ *
+ * This is the final step. We continually poll the token endpoint to see if the
+ * user has authorized us yet. finish_token_request() will pull either the token
+ * or a (ideally temporary) error status from the provider.
+ */
+static bool
+start_token_request(struct async_ctx *actx, PGconn *conn)
+{
+ const char *token_uri = actx->provider.token_endpoint;
+ const char *device_code = actx->authz.device_code;
+ PQExpBuffer work_buffer = &actx->work_data;
+
+ Assert(conn->oauth_client_id); /* ensured by setup_oauth_parameters() */
+ Assert(token_uri); /* ensured by parse_provider() */
+ Assert(device_code); /* ensured by parse_device_authz() */
+
+ /* Construct our request body. */
+ resetPQExpBuffer(work_buffer);
+ build_urlencoded(work_buffer, "device_code", device_code);
+ build_urlencoded(work_buffer, "grant_type", OAUTH_GRANT_TYPE_DEVICE_CODE);
+
+ if (!add_client_identification(actx, work_buffer, conn))
+ return false;
+
+ if (PQExpBufferBroken(work_buffer))
+ {
+ actx_error(actx, "out of memory");
+ return false;
+ }
+
+ /* Make our request. */
+ CHECK_SETOPT(actx, CURLOPT_URL, token_uri, return false);
+ CHECK_SETOPT(actx, CURLOPT_COPYPOSTFIELDS, work_buffer->data, return false);
+
+ return start_request(actx);
+}
+
+static bool
+finish_token_request(struct async_ctx *actx, struct token *tok)
+{
+ long response_code;
+
+ CHECK_GETINFO(actx, CURLINFO_RESPONSE_CODE, &response_code, return false);
+
+ /*
+ * Per RFC 6749, Section 5, a successful response uses 200 OK.
+ */
+ if (response_code == 200)
+ {
+ actx->errctx = "failed to parse access token response";
+ if (!parse_access_token(actx, tok))
+ return false; /* error message already set */
+
+ return true;
+ }
+
+ /*
+ * An error response uses either 400 Bad Request or 401 Unauthorized.
+ * There are references online to implementations using 403 for error
+ * return which would violate the specification. For now we stick to the
+ * specification but we might have to revisit this.
+ */
+ if (response_code == 400 || response_code == 401)
+ {
+ if (!parse_token_error(actx, &tok->err))
+ return false;
+
+ return true;
+ }
+
+ /* Any other response codes are considered invalid */
+ actx_error(actx, "unexpected response code %ld", response_code);
+ return false;
+}
+
+/*
+ * Finishes the token request and examines the response. If the flow has
+ * completed, a valid token will be returned via the parameter list. Otherwise,
+ * the token parameter remains unchanged, and the caller needs to wait for
+ * another interval (which will have been increased in response to a slow_down
+ * message from the server) before starting a new token request.
+ *
+ * False is returned only for permanent error conditions.
+ */
+static bool
+handle_token_response(struct async_ctx *actx, char **token)
+{
+ bool success = false;
+ struct token tok = {0};
+ const struct token_error *err;
+
+ if (!finish_token_request(actx, &tok))
+ goto token_cleanup;
+
+ /* A successful token request gives either a token or an in-band error. */
+ Assert(tok.access_token || tok.err.error);
+
+ if (tok.access_token)
+ {
+ *token = tok.access_token;
+ tok.access_token = NULL;
+
+ success = true;
+ goto token_cleanup;
+ }
+
+ /*
+ * authorization_pending and slow_down are the only acceptable errors;
+ * anything else and we bail. These are defined in RFC 8628, Sec. 3.5.
+ */
+ err = &tok.err;
+ if (strcmp(err->error, "authorization_pending") != 0 &&
+ strcmp(err->error, "slow_down") != 0)
+ {
+ record_token_error(actx, err);
+ goto token_cleanup;
+ }
+
+ /*
+ * A slow_down error requires us to permanently increase our retry
+ * interval by five seconds.
+ */
+ if (strcmp(err->error, "slow_down") == 0)
+ {
+ int prev_interval = actx->authz.interval;
+
+ actx->authz.interval += 5;
+ if (actx->authz.interval < prev_interval)
+ {
+ actx_error(actx, "slow_down interval overflow");
+ goto token_cleanup;
+ }
+ }
+
+ success = true;
+
+token_cleanup:
+ free_token(&tok);
+ return success;
+}
+
+/*
+ * Displays a device authorization prompt for action by the end user, either via
+ * the PQauthDataHook, or by a message on standard error if no hook is set.
+ */
+static bool
+prompt_user(struct async_ctx *actx, PGconn *conn)
+{
+ int res;
+ PGpromptOAuthDevice prompt = {
+ .verification_uri = actx->authz.verification_uri,
+ .user_code = actx->authz.user_code,
+ .verification_uri_complete = actx->authz.verification_uri_complete,
+ .expires_in = actx->authz.expires_in,
+ };
+
+ res = PQauthDataHook(PQAUTHDATA_PROMPT_OAUTH_DEVICE, conn, &prompt);
+
+ if (!res)
+ {
+ /*
+ * translator: The first %s is a URL for the user to visit in a
+ * browser, and the second %s is a code to be copy-pasted there.
+ */
+ fprintf(stderr, libpq_gettext("Visit %s and enter the code: %s\n"),
+ prompt.verification_uri, prompt.user_code);
+ }
+ else if (res < 0)
+ {
+ actx_error(actx, "device prompt failed");
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Calls curl_global_init() in a thread-safe way.
+ *
+ * libcurl has stringent requirements for the thread context in which you call
+ * curl_global_init(), because it's going to try initializing a bunch of other
+ * libraries (OpenSSL, Winsock, etc). Recent versions of libcurl have improved
+ * the thread-safety situation, but there's a chicken-and-egg problem at
+ * runtime: you can't check the thread safety until you've initialized libcurl,
+ * which you can't do from within a thread unless you know it's thread-safe...
+ *
+ * Returns true if initialization was successful. Successful or not, this
+ * function will not try to reinitialize Curl on successive calls.
+ */
+static bool
+initialize_curl(PGconn *conn)
+{
+ /*
+ * Don't let the compiler play tricks with this variable. In the
+ * HAVE_THREADSAFE_CURL_GLOBAL_INIT case, we don't care if two threads
+ * enter simultaneously, but we do care if this gets set transiently to
+ * PG_BOOL_YES/NO in cases where that's not the final answer.
+ */
+ static volatile PGTernaryBool init_successful = PG_BOOL_UNKNOWN;
+#if HAVE_THREADSAFE_CURL_GLOBAL_INIT
+ curl_version_info_data *info;
+#endif
+
+#if !HAVE_THREADSAFE_CURL_GLOBAL_INIT
+
+ /*
+ * Lock around the whole function. If a libpq client performs its own work
+ * with libcurl, it must either ensure that Curl is initialized safely
+ * before calling us (in which case our call will be a no-op), or else it
+ * must guard its own calls to curl_global_init() with a registered
+ * threadlock handler. See PQregisterThreadLock().
+ */
+ pglock_thread();
+#endif
+
+ /*
+ * Skip initialization if we've already done it. (Curl tracks the number
+ * of calls; there's no point in incrementing the counter every time we
+ * connect.)
+ */
+ if (init_successful == PG_BOOL_YES)
+ goto done;
+ else if (init_successful == PG_BOOL_NO)
+ {
+ libpq_append_conn_error(conn,
+ "curl_global_init previously failed during OAuth setup");
+ goto done;
+ }
+
+ /*
+ * We know we've already initialized Winsock by this point (see
+ * pqMakeEmptyPGconn()), so we should be able to safely skip that bit. But
+ * we have to tell libcurl to initialize everything else, because other
+ * pieces of our client executable may already be using libcurl for their
+ * own purposes. If we initialize libcurl with only a subset of its
+ * features, we could break those other clients nondeterministically, and
+ * that would probably be a nightmare to debug.
+ *
+ * If some other part of the program has already called this, it's a
+ * no-op.
+ */
+ if (curl_global_init(CURL_GLOBAL_ALL & ~CURL_GLOBAL_WIN32) != CURLE_OK)
+ {
+ libpq_append_conn_error(conn,
+ "curl_global_init failed during OAuth setup");
+ init_successful = PG_BOOL_NO;
+ goto done;
+ }
+
+#if HAVE_THREADSAFE_CURL_GLOBAL_INIT
+
+ /*
+ * If we determined at configure time that the Curl installation is
+ * thread-safe, our job here is much easier. We simply initialize above
+ * without any locking (concurrent or duplicated calls are fine in that
+ * situation), then double-check to make sure the runtime setting agrees,
+ * to try to catch silent downgrades.
+ */
+ info = curl_version_info(CURLVERSION_NOW);
+ if (!(info->features & CURL_VERSION_THREADSAFE))
+ {
+ /*
+ * In a downgrade situation, the damage is already done. Curl global
+ * state may be corrupted. Be noisy.
+ */
+ libpq_append_conn_error(conn, "libcurl is no longer thread-safe\n"
+ "\tCurl initialization was reported thread-safe when libpq\n"
+ "\twas compiled, but the currently installed version of\n"
+ "\tlibcurl reports that it is not. Recompile libpq against\n"
+ "\tthe installed version of libcurl.");
+ init_successful = PG_BOOL_NO;
+ goto done;
+ }
+#endif
+
+ init_successful = PG_BOOL_YES;
+
+done:
+#if !HAVE_THREADSAFE_CURL_GLOBAL_INIT
+ pgunlock_thread();
+#endif
+ return (init_successful == PG_BOOL_YES);
+}
+
+/*
+ * The core nonblocking libcurl implementation. This will be called several
+ * times to pump the async engine.
+ *
+ * The architecture is based on PQconnectPoll(). The first half drives the
+ * connection state forward as necessary, returning if we're not ready to
+ * proceed to the next step yet. The second half performs the actual transition
+ * between states.
+ *
+ * You can trace the overall OAuth flow through the second half. It's linear
+ * until we get to the end, where we flip back and forth between
+ * OAUTH_STEP_TOKEN_REQUEST and OAUTH_STEP_WAIT_INTERVAL to regularly ping the
+ * provider.
+ */
+static PostgresPollingStatusType
+pg_fe_run_oauth_flow_impl(PGconn *conn)
+{
+ fe_oauth_state *state = conn->sasl_state;
+ struct async_ctx *actx;
+
+ if (!initialize_curl(conn))
+ return PGRES_POLLING_FAILED;
+
+ if (!state->async_ctx)
+ {
+ /*
+ * Create our asynchronous state, and hook it into the upper-level
+ * OAuth state immediately, so any failures below won't leak the
+ * context allocation.
+ */
+ actx = calloc(1, sizeof(*actx));
+ if (!actx)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ return PGRES_POLLING_FAILED;
+ }
+
+ actx->mux = PGINVALID_SOCKET;
+ actx->timerfd = -1;
+
+ /* Should we enable unsafe features? */
+ actx->debugging = oauth_unsafe_debugging_enabled();
+
+ state->async_ctx = actx;
+
+ initPQExpBuffer(&actx->work_data);
+ initPQExpBuffer(&actx->errbuf);
+
+ if (!setup_multiplexer(actx))
+ goto error_return;
+
+ if (!setup_curl_handles(actx))
+ goto error_return;
+ }
+
+ actx = state->async_ctx;
+
+ do
+ {
+ /* By default, the multiplexer is the altsock. Reassign as desired. */
+ conn->altsock = actx->mux;
+
+ switch (actx->step)
+ {
+ case OAUTH_STEP_INIT:
+ break;
+
+ case OAUTH_STEP_DISCOVERY:
+ case OAUTH_STEP_DEVICE_AUTHORIZATION:
+ case OAUTH_STEP_TOKEN_REQUEST:
+ {
+ PostgresPollingStatusType status;
+
+ status = drive_request(actx);
+
+ if (status == PGRES_POLLING_FAILED)
+ goto error_return;
+ else if (status != PGRES_POLLING_OK)
+ {
+ /* not done yet */
+ return status;
+ }
+
+ break;
+ }
+
+ case OAUTH_STEP_WAIT_INTERVAL:
+
+ /*
+ * The client application is supposed to wait until our timer
+ * expires before calling PQconnectPoll() again, but that
+ * might not happen. To avoid sending a token request early,
+ * check the timer before continuing.
+ */
+ if (!timer_expired(actx))
+ {
+ conn->altsock = actx->timerfd;
+ return PGRES_POLLING_READING;
+ }
+
+ /* Disable the expired timer. */
+ if (!set_timer(actx, -1))
+ goto error_return;
+
+ break;
+ }
+
+ /*
+ * Each case here must ensure that actx->running is set while we're
+ * waiting on some asynchronous work. Most cases rely on
+ * start_request() to do that for them.
+ */
+ switch (actx->step)
+ {
+ case OAUTH_STEP_INIT:
+ actx->errctx = "failed to fetch OpenID discovery document";
+ if (!start_discovery(actx, conn->oauth_discovery_uri))
+ goto error_return;
+
+ actx->step = OAUTH_STEP_DISCOVERY;
+ break;
+
+ case OAUTH_STEP_DISCOVERY:
+ if (!finish_discovery(actx))
+ goto error_return;
+
+ if (!check_issuer(actx, conn))
+ goto error_return;
+
+ actx->errctx = "cannot run OAuth device authorization";
+ if (!check_for_device_flow(actx))
+ goto error_return;
+
+ actx->errctx = "failed to obtain device authorization";
+ if (!start_device_authz(actx, conn))
+ goto error_return;
+
+ actx->step = OAUTH_STEP_DEVICE_AUTHORIZATION;
+ break;
+
+ case OAUTH_STEP_DEVICE_AUTHORIZATION:
+ if (!finish_device_authz(actx))
+ goto error_return;
+
+ actx->errctx = "failed to obtain access token";
+ if (!start_token_request(actx, conn))
+ goto error_return;
+
+ actx->step = OAUTH_STEP_TOKEN_REQUEST;
+ break;
+
+ case OAUTH_STEP_TOKEN_REQUEST:
+ if (!handle_token_response(actx, &conn->oauth_token))
+ goto error_return;
+
+ if (!actx->user_prompted)
+ {
+ /*
+ * Now that we know the token endpoint isn't broken, give
+ * the user the login instructions.
+ */
+ if (!prompt_user(actx, conn))
+ goto error_return;
+
+ actx->user_prompted = true;
+ }
+
+ if (conn->oauth_token)
+ break; /* done! */
+
+ /*
+ * Wait for the required interval before issuing the next
+ * request.
+ */
+ if (!set_timer(actx, actx->authz.interval * 1000))
+ goto error_return;
+
+ /*
+ * No Curl requests are running, so we can simplify by having
+ * the client wait directly on the timerfd rather than the
+ * multiplexer.
+ */
+ conn->altsock = actx->timerfd;
+
+ actx->step = OAUTH_STEP_WAIT_INTERVAL;
+ actx->running = 1;
+ break;
+
+ case OAUTH_STEP_WAIT_INTERVAL:
+ actx->errctx = "failed to obtain access token";
+ if (!start_token_request(actx, conn))
+ goto error_return;
+
+ actx->step = OAUTH_STEP_TOKEN_REQUEST;
+ break;
+ }
+
+ /*
+ * The vast majority of the time, if we don't have a token at this
+ * point, actx->running will be set. But there are some corner cases
+ * where we can immediately loop back around; see start_request().
+ */
+ } while (!conn->oauth_token && !actx->running);
+
+ /* If we've stored a token, we're done. Otherwise come back later. */
+ return conn->oauth_token ? PGRES_POLLING_OK : PGRES_POLLING_READING;
+
+error_return:
+
+ /*
+ * Assemble the three parts of our error: context, body, and detail. See
+ * also the documentation for struct async_ctx.
+ */
+ if (actx->errctx)
+ {
+ appendPQExpBufferStr(&conn->errorMessage,
+ libpq_gettext(actx->errctx));
+ appendPQExpBufferStr(&conn->errorMessage, ": ");
+ }
+
+ if (PQExpBufferDataBroken(actx->errbuf))
+ appendPQExpBufferStr(&conn->errorMessage,
+ libpq_gettext("out of memory"));
+ else
+ appendPQExpBufferStr(&conn->errorMessage, actx->errbuf.data);
+
+ if (actx->curl_err[0])
+ {
+ size_t len;
+
+ appendPQExpBuffer(&conn->errorMessage,
+ " (libcurl: %s)", actx->curl_err);
+
+ /* Sometimes libcurl adds a newline to the error buffer. :( */
+ len = conn->errorMessage.len;
+ if (len >= 2 && conn->errorMessage.data[len - 2] == '\n')
+ {
+ conn->errorMessage.data[len - 2] = ')';
+ conn->errorMessage.data[len - 1] = '\0';
+ conn->errorMessage.len--;
+ }
+ }
+
+ appendPQExpBufferStr(&conn->errorMessage, "\n");
+
+ return PGRES_POLLING_FAILED;
+}
+
+/*
+ * The top-level entry point. This is a convenient place to put necessary
+ * wrapper logic before handing off to the true implementation, above.
+ */
+PostgresPollingStatusType
+pg_fe_run_oauth_flow(PGconn *conn)
+{
+ PostgresPollingStatusType result;
+#ifndef WIN32
+ sigset_t osigset;
+ bool sigpipe_pending;
+ bool masked;
+
+ /*---
+ * Ignore SIGPIPE on this thread during all Curl processing.
+ *
+ * Because we support multiple threads, we have to set up libcurl with
+ * CURLOPT_NOSIGNAL, which disables its default global handling of
+ * SIGPIPE. From the Curl docs:
+ *
+ * libcurl makes an effort to never cause such SIGPIPE signals to
+ * trigger, but some operating systems have no way to avoid them and
+ * even on those that have there are some corner cases when they may
+ * still happen, contrary to our desire.
+ *
+ * Note that libcurl is also at the mercy of its DNS resolution and SSL
+ * libraries; if any of them forget a MSG_NOSIGNAL then we're in trouble.
+ * Modern platforms and libraries seem to get it right, so this is a
+ * difficult corner case to exercise in practice, and unfortunately it's
+ * not really clear whether it's necessary in all cases.
+ */
+ masked = (pq_block_sigpipe(&osigset, &sigpipe_pending) == 0);
+#endif
+
+ result = pg_fe_run_oauth_flow_impl(conn);
+
+#ifndef WIN32
+ if (masked)
+ {
+ /*
+ * Undo the SIGPIPE mask. Assume we may have gotten EPIPE (we have no
+ * way of knowing at this level).
+ */
+ pq_reset_sigpipe(&osigset, sigpipe_pending, true /* EPIPE, maybe */ );
+ }
+#endif
+
+ return result;
+}
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * fe-auth-oauth.c
+ * The front-end (client) implementation of OAuth/OIDC authentication
+ * using the SASL OAUTHBEARER mechanism.
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/interfaces/libpq/fe-auth-oauth.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres_fe.h"
+
+#include "common/base64.h"
+#include "common/hmac.h"
+#include "common/jsonapi.h"
+#include "common/oauth-common.h"
+#include "fe-auth.h"
+#include "fe-auth-oauth.h"
+#include "mb/pg_wchar.h"
+
+/* The exported OAuth callback mechanism. */
+static void *oauth_init(PGconn *conn, const char *password,
+ const char *sasl_mechanism);
+static SASLStatus oauth_exchange(void *opaq, bool final,
+ char *input, int inputlen,
+ char **output, int *outputlen);
+static bool oauth_channel_bound(void *opaq);
+static void oauth_free(void *opaq);
+
+const pg_fe_sasl_mech pg_oauth_mech = {
+ oauth_init,
+ oauth_exchange,
+ oauth_channel_bound,
+ oauth_free,
+};
+
+/*
+ * Initializes mechanism state for OAUTHBEARER.
+ *
+ * For a full description of the API, see libpq/fe-auth-sasl.h.
+ */
+static void *
+oauth_init(PGconn *conn, const char *password,
+ const char *sasl_mechanism)
+{
+ fe_oauth_state *state;
+
+ /*
+ * We only support one SASL mechanism here; anything else is programmer
+ * error.
+ */
+ Assert(sasl_mechanism != NULL);
+ Assert(strcmp(sasl_mechanism, OAUTHBEARER_NAME) == 0);
+
+ state = calloc(1, sizeof(*state));
+ if (!state)
+ return NULL;
+
+ state->step = FE_OAUTH_INIT;
+ state->conn = conn;
+
+ return state;
+}
+
+/*
+ * Frees the state allocated by oauth_init().
+ *
+ * This handles only mechanism state tied to the connection lifetime; state
+ * stored in state->async_ctx is freed up either immediately after the
+ * authentication handshake succeeds, or before the mechanism is cleaned up on
+ * failure. See pg_fe_cleanup_oauth_flow() and cleanup_user_oauth_flow().
+ */
+static void
+oauth_free(void *opaq)
+{
+ fe_oauth_state *state = opaq;
+
+ /* Any async authentication state should have been cleaned up already. */
+ Assert(!state->async_ctx);
+
+ free(state);
+}
+
+#define kvsep "\x01"
+
+/*
+ * Constructs an OAUTHBEARER client initial response (RFC 7628, Sec. 3.1).
+ *
+ * If discover is true, the initial response will contain a request for the
+ * server's required OAuth parameters (Sec. 4.3). Otherwise, conn->token must
+ * be set; it will be sent as the connection's bearer token.
+ *
+ * Returns the response as a null-terminated string, or NULL on error.
+ */
+static char *
+client_initial_response(PGconn *conn, bool discover)
+{
+ static const char *const resp_format = "n,," kvsep "auth=%s%s" kvsep kvsep;
+
+ PQExpBufferData buf;
+ const char *authn_scheme;
+ char *response = NULL;
+ const char *token = conn->oauth_token;
+
+ if (discover)
+ {
+ /* Parameter discovery uses a completely empty auth value. */
+ authn_scheme = token = "";
+ }
+ else
+ {
+ /*
+ * Use a Bearer authentication scheme (RFC 6750, Sec. 2.1). A trailing
+ * space is used as a separator.
+ */
+ authn_scheme = "Bearer ";
+
+ /* conn->token must have been set in this case. */
+ if (!token)
+ {
+ Assert(false);
+ libpq_append_conn_error(conn,
+ "internal error: no OAuth token was set for the connection");
+ return NULL;
+ }
+ }
+
+ initPQExpBuffer(&buf);
+ appendPQExpBuffer(&buf, resp_format, authn_scheme, token);
+
+ if (!PQExpBufferDataBroken(buf))
+ response = strdup(buf.data);
+ termPQExpBuffer(&buf);
+
+ if (!response)
+ libpq_append_conn_error(conn, "out of memory");
+
+ return response;
+}
+
+/*
+ * JSON Parser (for the OAUTHBEARER error result)
+ */
+
+/* Relevant JSON fields in the error result object. */
+#define ERROR_STATUS_FIELD "status"
+#define ERROR_SCOPE_FIELD "scope"
+#define ERROR_OPENID_CONFIGURATION_FIELD "openid-configuration"
+
+struct json_ctx
+{
+ char *errmsg; /* any non-NULL value stops all processing */
+ PQExpBufferData errbuf; /* backing memory for errmsg */
+ int nested; /* nesting level (zero is the top) */
+
+ const char *target_field_name; /* points to a static allocation */
+ char **target_field; /* see below */
+
+ /* target_field, if set, points to one of the following: */
+ char *status;
+ char *scope;
+ char *discovery_uri;
+};
+
+#define oauth_json_has_error(ctx) \
+ (PQExpBufferDataBroken((ctx)->errbuf) || (ctx)->errmsg)
+
+#define oauth_json_set_error(ctx, ...) \
+ do { \
+ appendPQExpBuffer(&(ctx)->errbuf, __VA_ARGS__); \
+ (ctx)->errmsg = (ctx)->errbuf.data; \
+ } while (0)
+
+static JsonParseErrorType
+oauth_json_object_start(void *state)
+{
+ struct json_ctx *ctx = state;
+
+ if (ctx->target_field)
+ {
+ Assert(ctx->nested == 1);
+
+ oauth_json_set_error(ctx,
+ libpq_gettext("field \"%s\" must be a string"),
+ ctx->target_field_name);
+ }
+
+ ++ctx->nested;
+ return oauth_json_has_error(ctx) ? JSON_SEM_ACTION_FAILED : JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_object_end(void *state)
+{
+ struct json_ctx *ctx = state;
+
+ --ctx->nested;
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_object_field_start(void *state, char *name, bool isnull)
+{
+ struct json_ctx *ctx = state;
+
+ /* Only top-level keys are considered. */
+ if (ctx->nested == 1)
+ {
+ if (strcmp(name, ERROR_STATUS_FIELD) == 0)
+ {
+ ctx->target_field_name = ERROR_STATUS_FIELD;
+ ctx->target_field = &ctx->status;
+ }
+ else if (strcmp(name, ERROR_SCOPE_FIELD) == 0)
+ {
+ ctx->target_field_name = ERROR_SCOPE_FIELD;
+ ctx->target_field = &ctx->scope;
+ }
+ else if (strcmp(name, ERROR_OPENID_CONFIGURATION_FIELD) == 0)
+ {
+ ctx->target_field_name = ERROR_OPENID_CONFIGURATION_FIELD;
+ ctx->target_field = &ctx->discovery_uri;
+ }
+ }
+
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_array_start(void *state)
+{
+ struct json_ctx *ctx = state;
+
+ if (!ctx->nested)
+ {
+ ctx->errmsg = libpq_gettext("top-level element must be an object");
+ }
+ else if (ctx->target_field)
+ {
+ Assert(ctx->nested == 1);
+
+ oauth_json_set_error(ctx,
+ libpq_gettext("field \"%s\" must be a string"),
+ ctx->target_field_name);
+ }
+
+ return oauth_json_has_error(ctx) ? JSON_SEM_ACTION_FAILED : JSON_SUCCESS;
+}
+
+static JsonParseErrorType
+oauth_json_scalar(void *state, char *token, JsonTokenType type)
+{
+ struct json_ctx *ctx = state;
+
+ if (!ctx->nested)
+ {
+ ctx->errmsg = libpq_gettext("top-level element must be an object");
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ if (ctx->target_field)
+ {
+ if (ctx->nested != 1)
+ {
+ /*
+ * ctx->target_field should not have been set for nested keys.
+ * Assert and don't continue any further for production builds.
+ */
+ Assert(false);
+ oauth_json_set_error(ctx,
+ "internal error: target scalar found at nesting level %d during OAUTHBEARER parsing",
+ ctx->nested);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ /*
+ * We don't allow duplicate field names; error out if the target has
+ * already been set.
+ */
+ if (*ctx->target_field)
+ {
+ oauth_json_set_error(ctx,
+ libpq_gettext("field \"%s\" is duplicated"),
+ ctx->target_field_name);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ /* The only fields we support are strings. */
+ if (type != JSON_TOKEN_STRING)
+ {
+ oauth_json_set_error(ctx,
+ libpq_gettext("field \"%s\" must be a string"),
+ ctx->target_field_name);
+ return JSON_SEM_ACTION_FAILED;
+ }
+
+ *ctx->target_field = strdup(token);
+ if (!*ctx->target_field)
+ return JSON_OUT_OF_MEMORY;
+
+ ctx->target_field = NULL;
+ ctx->target_field_name = NULL;
+ }
+ else
+ {
+ /* otherwise we just ignore it */
+ }
+
+ return JSON_SUCCESS;
+}
+
+#define HTTPS_SCHEME "https://"
+#define HTTP_SCHEME "http://"
+
+/* We support both well-known suffixes defined by RFC 8414. */
+#define WK_PREFIX "/.well-known/"
+#define OPENID_WK_SUFFIX "openid-configuration"
+#define OAUTH_WK_SUFFIX "oauth-authorization-server"
+
+/*
+ * Derives an issuer identifier from one of our recognized .well-known URIs,
+ * using the rules in RFC 8414.
+ */
+static char *
+issuer_from_well_known_uri(PGconn *conn, const char *wkuri)
+{
+ const char *authority_start = NULL;
+ const char *wk_start;
+ const char *wk_end;
+ char *issuer;
+ ptrdiff_t start_offset,
+ end_offset;
+ size_t end_len;
+
+ /*
+ * https:// is required for issuer identifiers (RFC 8414, Sec. 2; OIDC
+ * Discovery 1.0, Sec. 3). This is a case-insensitive comparison at this
+ * level (but issuer identifier comparison at the level above this is
+ * case-sensitive, so in practice it's probably moot).
+ */
+ if (pg_strncasecmp(wkuri, HTTPS_SCHEME, strlen(HTTPS_SCHEME)) == 0)
+ authority_start = wkuri + strlen(HTTPS_SCHEME);
+
+ if (!authority_start
+ && oauth_unsafe_debugging_enabled()
+ && pg_strncasecmp(wkuri, HTTP_SCHEME, strlen(HTTP_SCHEME)) == 0)
+ {
+ /* Allow http:// for testing only. */
+ authority_start = wkuri + strlen(HTTP_SCHEME);
+ }
+
+ if (!authority_start)
+ {
+ libpq_append_conn_error(conn,
+ "OAuth discovery URI \"%s\" must use HTTPS",
+ wkuri);
+ return NULL;
+ }
+
+ /*
+ * Well-known URIs in general may support queries and fragments, but the
+ * two types we support here do not. (They must be constructed from the
+ * components of issuer identifiers, which themselves may not contain any
+ * queries or fragments.)
+ *
+ * It's important to check this first, to avoid getting tricked later by a
+ * prefix buried inside a query or fragment.
+ */
+ if (strpbrk(authority_start, "?#") != NULL)
+ {
+ libpq_append_conn_error(conn,
+ "OAuth discovery URI \"%s\" must not contain query or fragment components",
+ wkuri);
+ return NULL;
+ }
+
+ /*
+ * Find the start of the .well-known prefix. IETF rules (RFC 8615) state
+ * this must be at the beginning of the path component, but OIDC defined
+ * it at the end instead (OIDC Discovery 1.0, Sec. 4), so we have to
+ * search for it anywhere.
+ */
+ wk_start = strstr(authority_start, WK_PREFIX);
+ if (!wk_start)
+ {
+ libpq_append_conn_error(conn,
+ "OAuth discovery URI \"%s\" is not a .well-known URI",
+ wkuri);
+ return NULL;
+ }
+
+ /*
+ * Now find the suffix type. We only support the two defined in OIDC
+ * Discovery 1.0 and RFC 8414.
+ */
+ wk_end = wk_start + strlen(WK_PREFIX);
+
+ if (strncmp(wk_end, OPENID_WK_SUFFIX, strlen(OPENID_WK_SUFFIX)) == 0)
+ wk_end += strlen(OPENID_WK_SUFFIX);
+ else if (strncmp(wk_end, OAUTH_WK_SUFFIX, strlen(OAUTH_WK_SUFFIX)) == 0)
+ wk_end += strlen(OAUTH_WK_SUFFIX);
+ else
+ wk_end = NULL;
+
+ /*
+ * Even if there's a match, we still need to check to make sure the suffix
+ * takes up the entire path segment, to weed out constructions like
+ * "/.well-known/openid-configuration-bad".
+ */
+ if (!wk_end || (*wk_end != '/' && *wk_end != '\0'))
+ {
+ libpq_append_conn_error(conn,
+ "OAuth discovery URI \"%s\" uses an unsupported .well-known suffix",
+ wkuri);
+ return NULL;
+ }
+
+ /*
+ * Finally, make sure the .well-known components are provided either as a
+ * prefix (IETF style) or as a postfix (OIDC style). In other words,
+ * "https://localhost/a/.well-known/openid-configuration/b" is not allowed
+ * to claim association with "https://localhost/a/b".
+ */
+ if (*wk_end != '\0')
+ {
+ /*
+ * It's not at the end, so it's required to be at the beginning at the
+ * path. Find the starting slash.
+ */
+ const char *path_start;
+
+ path_start = strchr(authority_start, '/');
+ Assert(path_start); /* otherwise we wouldn't have found WK_PREFIX */
+
+ if (wk_start != path_start)
+ {
+ libpq_append_conn_error(conn,
+ "OAuth discovery URI \"%s\" uses an invalid format",
+ wkuri);
+ return NULL;
+ }
+ }
+
+ /* Checks passed! Now build the issuer. */
+ issuer = strdup(wkuri);
+ if (!issuer)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ return NULL;
+ }
+
+ /*
+ * The .well-known components are from [wk_start, wk_end). Remove those to
+ * form the issuer ID, by shifting the path suffix (which may be empty)
+ * leftwards.
+ */
+ start_offset = wk_start - wkuri;
+ end_offset = wk_end - wkuri;
+ end_len = strlen(wk_end) + 1; /* move the NULL terminator too */
+
+ memmove(issuer + start_offset, issuer + end_offset, end_len);
+
+ return issuer;
+}
+
+/*
+ * Parses the server error result (RFC 7628, Sec. 3.2.2) contained in msg and
+ * stores any discovered openid_configuration and scope settings for the
+ * connection.
+ */
+static bool
+handle_oauth_sasl_error(PGconn *conn, const char *msg, int msglen)
+{
+ JsonLexContext lex = {0};
+ JsonSemAction sem = {0};
+ JsonParseErrorType err;
+ struct json_ctx ctx = {0};
+ char *errmsg = NULL;
+ bool success = false;
+
+ Assert(conn->oauth_issuer_id); /* ensured by setup_oauth_parameters() */
+
+ /* Sanity check. */
+ if (strlen(msg) != msglen)
+ {
+ libpq_append_conn_error(conn,
+ "server's error message contained an embedded NULL, and was discarded");
+ return false;
+ }
+
+ /*
+ * pg_parse_json doesn't validate the incoming UTF-8, so we have to check
+ * that up front.
+ */
+ if (pg_encoding_verifymbstr(PG_UTF8, msg, msglen) != msglen)
+ {
+ libpq_append_conn_error(conn,
+ "server's error response is not valid UTF-8");
+ return false;
+ }
+
+ makeJsonLexContextCstringLen(&lex, msg, msglen, PG_UTF8, true);
+ setJsonLexContextOwnsTokens(&lex, true); /* must not leak on error */
+
+ initPQExpBuffer(&ctx.errbuf);
+ sem.semstate = &ctx;
+
+ sem.object_start = oauth_json_object_start;
+ sem.object_end = oauth_json_object_end;
+ sem.object_field_start = oauth_json_object_field_start;
+ sem.array_start = oauth_json_array_start;
+ sem.scalar = oauth_json_scalar;
+
+ err = pg_parse_json(&lex, &sem);
+
+ if (err == JSON_SEM_ACTION_FAILED)
+ {
+ if (PQExpBufferDataBroken(ctx.errbuf))
+ errmsg = libpq_gettext("out of memory");
+ else if (ctx.errmsg)
+ errmsg = ctx.errmsg;
+ else
+ {
+ /*
+ * Developer error: one of the action callbacks didn't call
+ * oauth_json_set_error() before erroring out.
+ */
+ Assert(oauth_json_has_error(&ctx));
+ errmsg = "<unexpected empty error>";
+ }
+ }
+ else if (err != JSON_SUCCESS)
+ errmsg = json_errdetail(err, &lex);
+
+ if (errmsg)
+ libpq_append_conn_error(conn,
+ "failed to parse server's error response: %s",
+ errmsg);
+
+ /* Don't need the error buffer or the JSON lexer anymore. */
+ termPQExpBuffer(&ctx.errbuf);
+ freeJsonLexContext(&lex);
+
+ if (errmsg)
+ goto cleanup;
+
+ if (ctx.discovery_uri)
+ {
+ char *discovery_issuer;
+
+ /*
+ * The URI MUST correspond to our existing issuer, to avoid mix-ups.
+ *
+ * Issuer comparison is done byte-wise, rather than performing any URL
+ * normalization; this follows the suggestions for issuer comparison
+ * in RFC 9207 Sec. 2.4 (which requires simple string comparison) and
+ * vastly simplifies things. Since this is the key protection against
+ * a rogue server sending the client to an untrustworthy location,
+ * simpler is better.
+ */
+ discovery_issuer = issuer_from_well_known_uri(conn, ctx.discovery_uri);
+ if (!discovery_issuer)
+ goto cleanup; /* error message already set */
+
+ if (strcmp(conn->oauth_issuer_id, discovery_issuer) != 0)
+ {
+ libpq_append_conn_error(conn,
+ "server's discovery document at %s (issuer \"%s\") is incompatible with oauth_issuer (%s)",
+ ctx.discovery_uri, discovery_issuer,
+ conn->oauth_issuer_id);
+
+ free(discovery_issuer);
+ goto cleanup;
+ }
+
+ free(discovery_issuer);
+
+ if (!conn->oauth_discovery_uri)
+ {
+ conn->oauth_discovery_uri = ctx.discovery_uri;
+ ctx.discovery_uri = NULL;
+ }
+ else
+ {
+ /* This must match the URI we'd previously determined. */
+ if (strcmp(conn->oauth_discovery_uri, ctx.discovery_uri) != 0)
+ {
+ libpq_append_conn_error(conn,
+ "server's discovery document has moved to %s (previous location was %s)",
+ ctx.discovery_uri,
+ conn->oauth_discovery_uri);
+ goto cleanup;
+ }
+ }
+ }
+
+ if (ctx.scope)
+ {
+ /* Servers may not override a previously set oauth_scope. */
+ if (!conn->oauth_scope)
+ {
+ conn->oauth_scope = ctx.scope;
+ ctx.scope = NULL;
+ }
+ }
+
+ if (!ctx.status)
+ {
+ libpq_append_conn_error(conn,
+ "server sent error response without a status");
+ goto cleanup;
+ }
+
+ if (strcmp(ctx.status, "invalid_token") != 0)
+ {
+ /*
+ * invalid_token is the only error code we'll automatically retry for;
+ * otherwise, just bail out now.
+ */
+ libpq_append_conn_error(conn,
+ "server rejected OAuth bearer token: %s",
+ ctx.status);
+ goto cleanup;
+ }
+
+ success = true;
+
+cleanup:
+ free(ctx.status);
+ free(ctx.scope);
+ free(ctx.discovery_uri);
+
+ return success;
+}
+
+/*
+ * Callback implementation of conn->async_auth() for a user-defined OAuth flow.
+ * Delegates the retrieval of the token to the application's async callback.
+ *
+ * This will be called multiple times as needed; the application is responsible
+ * for setting an altsock to signal and returning the correct PGRES_POLLING_*
+ * statuses for use by PQconnectPoll().
+ */
+static PostgresPollingStatusType
+run_user_oauth_flow(PGconn *conn)
+{
+ fe_oauth_state *state = conn->sasl_state;
+ PGoauthBearerRequest *request = state->async_ctx;
+ PostgresPollingStatusType status;
+
+ if (!request->async)
+ {
+ libpq_append_conn_error(conn,
+ "user-defined OAuth flow provided neither a token nor an async callback");
+ return PGRES_POLLING_FAILED;
+ }
+
+ status = request->async(conn, request, &conn->altsock);
+ if (status == PGRES_POLLING_FAILED)
+ {
+ libpq_append_conn_error(conn, "user-defined OAuth flow failed");
+ return status;
+ }
+ else if (status == PGRES_POLLING_OK)
+ {
+ /*
+ * We already have a token, so copy it into the conn. (We can't hold
+ * onto the original string, since it may not be safe for us to free()
+ * it.)
+ */
+ if (!request->token)
+ {
+ libpq_append_conn_error(conn,
+ "user-defined OAuth flow did not provide a token");
+ return PGRES_POLLING_FAILED;
+ }
+
+ conn->oauth_token = strdup(request->token);
+ if (!conn->oauth_token)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ return PGRES_POLLING_FAILED;
+ }
+
+ return PGRES_POLLING_OK;
+ }
+
+ /* The hook wants the client to poll the altsock. Make sure it set one. */
+ if (conn->altsock == PGINVALID_SOCKET)
+ {
+ libpq_append_conn_error(conn,
+ "user-defined OAuth flow did not provide a socket for polling");
+ return PGRES_POLLING_FAILED;
+ }
+
+ return status;
+}
+
+/*
+ * Cleanup callback for the async user flow. Delegates most of its job to the
+ * user-provided cleanup implementation, then disconnects the altsock.
+ */
+static void
+cleanup_user_oauth_flow(PGconn *conn)
+{
+ fe_oauth_state *state = conn->sasl_state;
+ PGoauthBearerRequest *request = state->async_ctx;
+
+ Assert(request);
+
+ if (request->cleanup)
+ request->cleanup(conn, request);
+ conn->altsock = PGINVALID_SOCKET;
+
+ free(request);
+ state->async_ctx = NULL;
+}
+
+/*
+ * Chooses an OAuth client flow for the connection, which will retrieve a Bearer
+ * token for presentation to the server.
+ *
+ * If the application has registered a custom flow handler using
+ * PQAUTHDATA_OAUTH_BEARER_TOKEN, it may either return a token immediately (e.g.
+ * if it has one cached for immediate use), or set up for a series of
+ * asynchronous callbacks which will be managed by run_user_oauth_flow().
+ *
+ * If the default handler is used instead, a Device Authorization flow is used
+ * for the connection if support has been compiled in. (See
+ * fe-auth-oauth-curl.c for implementation details.)
+ *
+ * If neither a custom handler nor the builtin flow is available, the connection
+ * fails here.
+ */
+static bool
+setup_token_request(PGconn *conn, fe_oauth_state *state)
+{
+ int res;
+ PGoauthBearerRequest request = {
+ .openid_configuration = conn->oauth_discovery_uri,
+ .scope = conn->oauth_scope,
+ };
+
+ Assert(request.openid_configuration);
+
+ /* The client may have overridden the OAuth flow. */
+ res = PQauthDataHook(PQAUTHDATA_OAUTH_BEARER_TOKEN, conn, &request);
+ if (res > 0)
+ {
+ PGoauthBearerRequest *request_copy;
+
+ if (request.token)
+ {
+ /*
+ * We already have a token, so copy it into the conn. (We can't
+ * hold onto the original string, since it may not be safe for us
+ * to free() it.)
+ */
+ conn->oauth_token = strdup(request.token);
+ if (!conn->oauth_token)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ goto fail;
+ }
+
+ /* short-circuit */
+ if (request.cleanup)
+ request.cleanup(conn, &request);
+ return true;
+ }
+
+ request_copy = malloc(sizeof(*request_copy));
+ if (!request_copy)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ goto fail;
+ }
+
+ memcpy(request_copy, &request, sizeof(request));
+
+ conn->async_auth = run_user_oauth_flow;
+ conn->cleanup_async_auth = cleanup_user_oauth_flow;
+ state->async_ctx = request_copy;
+ }
+ else if (res < 0)
+ {
+ libpq_append_conn_error(conn, "user-defined OAuth flow failed");
+ goto fail;
+ }
+ else
+ {
+#if USE_LIBCURL
+ /* Hand off to our built-in OAuth flow. */
+ conn->async_auth = pg_fe_run_oauth_flow;
+ conn->cleanup_async_auth = pg_fe_cleanup_oauth_flow;
+
+#else
+ libpq_append_conn_error(conn, "no custom OAuth flows are available, and libpq was not built with libcurl support");
+ goto fail;
+
+#endif
+ }
+
+ return true;
+
+fail:
+ if (request.cleanup)
+ request.cleanup(conn, &request);
+ return false;
+}
+
+/*
+ * Fill in our issuer identifier (and discovery URI, if possible) using the
+ * connection parameters. If conn->oauth_discovery_uri can't be populated in
+ * this function, it will be requested from the server.
+ */
+static bool
+setup_oauth_parameters(PGconn *conn)
+{
+ /*
+ * This is the only function that sets conn->oauth_issuer_id. If a
+ * previous connection attempt has already computed it, don't overwrite it
+ * or the discovery URI. (There's no reason for them to change once
+ * they're set, and handle_oauth_sasl_error() will fail the connection if
+ * the server attempts to switch them on us later.)
+ */
+ if (conn->oauth_issuer_id)
+ return true;
+
+ /*---
+ * To talk to a server, we require the user to provide issuer and client
+ * identifiers.
+ *
+ * While it's possible for an OAuth client to support multiple issuers, it
+ * requires additional effort to make sure the flows in use are safe -- to
+ * quote RFC 9207,
+ *
+ * OAuth clients that interact with only one authorization server are
+ * not vulnerable to mix-up attacks. However, when such clients decide
+ * to add support for a second authorization server in the future, they
+ * become vulnerable and need to apply countermeasures to mix-up
+ * attacks.
+ *
+ * For now, we allow only one.
+ */
+ if (!conn->oauth_issuer || !conn->oauth_client_id)
+ {
+ libpq_append_conn_error(conn,
+ "server requires OAuth authentication, but oauth_issuer and oauth_client_id are not both set");
+ return false;
+ }
+
+ /*
+ * oauth_issuer is interpreted differently if it's a well-known discovery
+ * URI rather than just an issuer identifier.
+ */
+ if (strstr(conn->oauth_issuer, WK_PREFIX) != NULL)
+ {
+ /*
+ * Convert the URI back to an issuer identifier. (This also performs
+ * validation of the URI format.)
+ */
+ conn->oauth_issuer_id = issuer_from_well_known_uri(conn,
+ conn->oauth_issuer);
+ if (!conn->oauth_issuer_id)
+ return false; /* error message already set */
+
+ conn->oauth_discovery_uri = strdup(conn->oauth_issuer);
+ if (!conn->oauth_discovery_uri)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ return false;
+ }
+ }
+ else
+ {
+ /*
+ * Treat oauth_issuer as an issuer identifier. We'll ask the server
+ * for the discovery URI.
+ */
+ conn->oauth_issuer_id = strdup(conn->oauth_issuer);
+ if (!conn->oauth_issuer_id)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Implements the OAUTHBEARER SASL exchange (RFC 7628, Sec. 3.2).
+ *
+ * If the necessary OAuth parameters are set up on the connection, this will run
+ * the client flow asynchronously and present the resulting token to the server.
+ * Otherwise, an empty discovery response will be sent and any parameters sent
+ * back by the server will be stored for a second attempt.
+ *
+ * For a full description of the API, see libpq/sasl.h.
+ */
+static SASLStatus
+oauth_exchange(void *opaq, bool final,
+ char *input, int inputlen,
+ char **output, int *outputlen)
+{
+ fe_oauth_state *state = opaq;
+ PGconn *conn = state->conn;
+ bool discover = false;
+
+ *output = NULL;
+ *outputlen = 0;
+
+ switch (state->step)
+ {
+ case FE_OAUTH_INIT:
+ /* We begin in the initial response phase. */
+ Assert(inputlen == -1);
+
+ if (!setup_oauth_parameters(conn))
+ return SASL_FAILED;
+
+ if (conn->oauth_token)
+ {
+ /*
+ * A previous connection already fetched the token; we'll use
+ * it below.
+ */
+ }
+ else if (conn->oauth_discovery_uri)
+ {
+ /*
+ * We don't have a token, but we have a discovery URI already
+ * stored. Decide whether we're using a user-provided OAuth
+ * flow or the one we have built in.
+ */
+ if (!setup_token_request(conn, state))
+ return SASL_FAILED;
+
+ if (conn->oauth_token)
+ {
+ /*
+ * A really smart user implementation may have already
+ * given us the token (e.g. if there was an unexpired copy
+ * already cached), and we can use it immediately.
+ */
+ }
+ else
+ {
+ /*
+ * Otherwise, we'll have to hand the connection over to
+ * our OAuth implementation.
+ *
+ * This could take a while, since it generally involves a
+ * user in the loop. To avoid consuming the server's
+ * authentication timeout, we'll continue this handshake
+ * to the end, so that the server can close its side of
+ * the connection. We'll open a second connection later
+ * once we've retrieved a token.
+ */
+ discover = true;
+ }
+ }
+ else
+ {
+ /*
+ * If we don't have a token, and we don't have a discovery URI
+ * to be able to request a token, we ask the server for one
+ * explicitly.
+ */
+ discover = true;
+ }
+
+ /*
+ * Generate an initial response. This either contains a token, if
+ * we have one, or an empty discovery response which is doomed to
+ * fail.
+ */
+ *output = client_initial_response(conn, discover);
+ if (!*output)
+ return SASL_FAILED;
+
+ *outputlen = strlen(*output);
+ state->step = FE_OAUTH_BEARER_SENT;
+
+ if (conn->oauth_token)
+ {
+ /*
+ * For the purposes of require_auth, our side of
+ * authentication is done at this point; the server will
+ * either accept the connection or send an error. Unlike
+ * SCRAM, there is no additional server data to check upon
+ * success.
+ */
+ conn->client_finished_auth = true;
+ }
+
+ return SASL_CONTINUE;
+
+ case FE_OAUTH_BEARER_SENT:
+ if (final)
+ {
+ /*
+ * OAUTHBEARER does not make use of additional data with a
+ * successful SASL exchange, so we shouldn't get an
+ * AuthenticationSASLFinal message.
+ */
+ libpq_append_conn_error(conn,
+ "server sent unexpected additional OAuth data");
+ return SASL_FAILED;
+ }
+
+ /*
+ * An error message was sent by the server. Respond with the
+ * required dummy message (RFC 7628, sec. 3.2.3).
+ */
+ *output = strdup(kvsep);
+ if (unlikely(!*output))
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ return SASL_FAILED;
+ }
+ *outputlen = strlen(*output); /* == 1 */
+
+ /* Grab the settings from discovery. */
+ if (!handle_oauth_sasl_error(conn, input, inputlen))
+ return SASL_FAILED;
+
+ if (conn->oauth_token)
+ {
+ /*
+ * The server rejected our token. Continue onwards towards the
+ * expected FATAL message, but mark our state to catch any
+ * unexpected "success" from the server.
+ */
+ state->step = FE_OAUTH_SERVER_ERROR;
+ return SASL_CONTINUE;
+ }
+
+ if (!conn->async_auth)
+ {
+ /*
+ * No OAuth flow is set up yet. Did we get enough information
+ * from the server to create one?
+ */
+ if (!conn->oauth_discovery_uri)
+ {
+ libpq_append_conn_error(conn,
+ "server requires OAuth authentication, but no discovery metadata was provided");
+ return SASL_FAILED;
+ }
+
+ /* Yes. Set up the flow now. */
+ if (!setup_token_request(conn, state))
+ return SASL_FAILED;
+
+ if (conn->oauth_token)
+ {
+ /*
+ * A token was available in a custom flow's cache. Skip
+ * the asynchronous processing.
+ */
+ goto reconnect;
+ }
+ }
+
+ /*
+ * Time to retrieve a token. This involves a number of HTTP
+ * connections and timed waits, so we escape the synchronous auth
+ * processing and tell PQconnectPoll to transfer control to our
+ * async implementation.
+ */
+ Assert(conn->async_auth); /* should have been set already */
+ state->step = FE_OAUTH_REQUESTING_TOKEN;
+ return SASL_ASYNC;
+
+ case FE_OAUTH_REQUESTING_TOKEN:
+
+ /*
+ * We've returned successfully from token retrieval. Double-check
+ * that we have what we need for the next connection.
+ */
+ if (!conn->oauth_token)
+ {
+ Assert(false); /* should have failed before this point! */
+ libpq_append_conn_error(conn,
+ "internal error: OAuth flow did not set a token");
+ return SASL_FAILED;
+ }
+
+ goto reconnect;
+
+ case FE_OAUTH_SERVER_ERROR:
+
+ /*
+ * After an error, the server should send an error response to
+ * fail the SASL handshake, which is handled in higher layers.
+ *
+ * If we get here, the server either sent *another* challenge
+ * which isn't defined in the RFC, or completed the handshake
+ * successfully after telling us it was going to fail. Neither is
+ * acceptable.
+ */
+ libpq_append_conn_error(conn,
+ "server sent additional OAuth data after error");
+ return SASL_FAILED;
+
+ default:
+ libpq_append_conn_error(conn, "invalid OAuth exchange state");
+ break;
+ }
+
+ Assert(false); /* should never get here */
+ return SASL_FAILED;
+
+reconnect:
+
+ /*
+ * Despite being a failure from the point of view of SASL, we have enough
+ * information to restart with a new connection.
+ */
+ libpq_append_conn_error(conn, "retrying connection with new bearer token");
+ conn->oauth_want_retry = true;
+ return SASL_FAILED;
+}
+
+static bool
+oauth_channel_bound(void *opaq)
+{
+ /* This mechanism does not support channel binding. */
+ return false;
+}
+
+/*
+ * Fully clears out any stored OAuth token. This is done proactively upon
+ * successful connection as well as during pqClosePGconn().
+ */
+void
+pqClearOAuthToken(PGconn *conn)
+{
+ if (!conn->oauth_token)
+ return;
+
+ explicit_bzero(conn->oauth_token, strlen(conn->oauth_token));
+ free(conn->oauth_token);
+ conn->oauth_token = NULL;
+}
+
+/*
+ * Returns true if the PGOAUTHDEBUG=UNSAFE flag is set in the environment.
+ */
+bool
+oauth_unsafe_debugging_enabled(void)
+{
+ const char *env = getenv("PGOAUTHDEBUG");
+
+ return (env && strcmp(env, "UNSAFE") == 0);
+}
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * fe-auth-oauth.h
+ *
+ * Definitions for OAuth authentication implementations
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/interfaces/libpq/fe-auth-oauth.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef FE_AUTH_OAUTH_H
+#define FE_AUTH_OAUTH_H
+
+#include "libpq-fe.h"
+#include "libpq-int.h"
+
+
+enum fe_oauth_step
+{
+ FE_OAUTH_INIT,
+ FE_OAUTH_BEARER_SENT,
+ FE_OAUTH_REQUESTING_TOKEN,
+ FE_OAUTH_SERVER_ERROR,
+};
+
+typedef struct
+{
+ enum fe_oauth_step step;
+
+ PGconn *conn;
+ void *async_ctx;
+} fe_oauth_state;
+
+extern PostgresPollingStatusType pg_fe_run_oauth_flow(PGconn *conn);
+extern void pg_fe_cleanup_oauth_flow(PGconn *conn);
+extern void pqClearOAuthToken(PGconn *conn);
+extern bool oauth_unsafe_debugging_enabled(void);
+
+/* Mechanisms in fe-auth-oauth.c */
+extern const pg_fe_sasl_mech pg_oauth_mech;
+
+#endif /* FE_AUTH_OAUTH_H */
#endif
#include "common/md5.h"
+#include "common/oauth-common.h"
#include "common/scram-common.h"
#include "fe-auth.h"
#include "fe-auth-sasl.h"
+#include "fe-auth-oauth.h"
#include "libpq-fe.h"
#ifdef ENABLE_GSS
conn->sasl = &pg_scram_mech;
conn->password_needed = true;
}
+ else if (strcmp(mechanism_buf.data, OAUTHBEARER_NAME) == 0 &&
+ !selected_mechanism)
+ {
+ selected_mechanism = OAUTHBEARER_NAME;
+ conn->sasl = &pg_oauth_mech;
+ conn->password_needed = false;
+ }
}
if (!selected_mechanism)
if (!allowed)
{
- /*
- * TODO: this is dead code until a second SASL mechanism is added;
- * the connection can't have proceeded past check_expected_areq()
- * if no SASL methods are allowed.
- */
- Assert(false);
-
libpq_append_conn_error(conn, "authentication method requirement \"%s\" failed: server requested %s authentication",
conn->require_auth, selected_mechanism);
goto error;
}
}
}
+
+PQauthDataHook_type PQauthDataHook = PQdefaultAuthDataHook;
+
+PQauthDataHook_type
+PQgetAuthDataHook(void)
+{
+ return PQauthDataHook;
+}
+
+void
+PQsetAuthDataHook(PQauthDataHook_type hook)
+{
+ PQauthDataHook = hook ? hook : PQdefaultAuthDataHook;
+}
+
+int
+PQdefaultAuthDataHook(PGauthData type, PGconn *conn, void *data)
+{
+ return 0; /* handle nothing */
+}
#include "libpq-int.h"
+extern PQauthDataHook_type PQauthDataHook;
+
+
/* Prototypes for functions in fe-auth.c */
extern int pg_fe_sendauth(AuthRequest areq, int payloadlen, PGconn *conn,
bool *async);
#include "common/scram-common.h"
#include "common/string.h"
#include "fe-auth.h"
+#include "fe-auth-oauth.h"
#include "libpq-fe.h"
#include "libpq-int.h"
#include "mb/pg_wchar.h"
{"scram_server_key", NULL, NULL, NULL, "SCRAM-Server-Key", "D", SCRAM_MAX_KEY_LEN * 2,
offsetof(struct pg_conn, scram_server_key)},
+ /* OAuth v2 */
+ {"oauth_issuer", NULL, NULL, NULL,
+ "OAuth-Issuer", "", 40,
+ offsetof(struct pg_conn, oauth_issuer)},
+
+ {"oauth_client_id", NULL, NULL, NULL,
+ "OAuth-Client-ID", "", 40,
+ offsetof(struct pg_conn, oauth_client_id)},
+
+ {"oauth_client_secret", NULL, NULL, NULL,
+ "OAuth-Client-Secret", "", 40,
+ offsetof(struct pg_conn, oauth_client_secret)},
+
+ {"oauth_scope", NULL, NULL, NULL,
+ "OAuth-Scope", "", 15,
+ offsetof(struct pg_conn, oauth_scope)},
+
/* Terminating entry --- MUST BE LAST */
{NULL, NULL, NULL, NULL,
NULL, NULL, 0}
static const pg_fe_sasl_mech *supported_sasl_mechs[] =
{
&pg_scram_mech,
+ &pg_oauth_mech,
};
#define SASL_MECHANISM_COUNT lengthof(supported_sasl_mechs)
conn->write_failed = false;
free(conn->write_err_msg);
conn->write_err_msg = NULL;
+ conn->oauth_want_retry = false;
/*
* Cancel connections need to retain their be_pid and be_key across
fill_allowed_sasl_mechs(PGconn *conn)
{
/*---
- * We only support one mechanism at the moment, so rather than deal with a
+ * We only support two mechanisms at the moment, so rather than deal with a
* linked list, conn->allowed_sasl_mechs is an array of static length. We
* rely on the compile-time assertion here to keep us honest.
*
{
mech = &pg_scram_mech;
}
+ else if (strcmp(method, "oauth") == 0)
+ {
+ mech = &pg_oauth_mech;
+ }
/*
* Final group: meta-options.
conn->inStart = conn->inCursor;
if (res != STATUS_OK)
+ {
+ /*
+ * OAuth connections may perform two-step discovery, where
+ * the first connection is a dummy.
+ */
+ if (conn->sasl == &pg_oauth_mech && conn->oauth_want_retry)
+ {
+ need_new_connection = true;
+ goto keep_going;
+ }
+
goto error_return;
+ }
/*
* Just make sure that any data sent by pg_fe_sendauth is
}
}
+ /* Don't hold onto any OAuth tokens longer than necessary. */
+ pqClearOAuthToken(conn);
+
/*
* For non cancel requests we can release the address list
* now. For cancel requests we never actually resolve
free(conn->load_balance_hosts);
free(conn->scram_client_key);
free(conn->scram_server_key);
+ free(conn->oauth_issuer);
+ free(conn->oauth_issuer_id);
+ free(conn->oauth_discovery_uri);
+ free(conn->oauth_client_id);
+ free(conn->oauth_client_secret);
+ free(conn->oauth_scope);
termPQExpBuffer(&conn->errorMessage);
termPQExpBuffer(&conn->workBuffer);
conn->asyncStatus = PGASYNC_IDLE;
conn->xactStatus = PQTRANS_IDLE;
conn->pipelineStatus = PQ_PIPELINE_OFF;
+ pqClearOAuthToken(conn);
pqClearAsyncResult(conn); /* deallocate result */
pqClearConnErrorState(conn);
/* Features added in PostgreSQL v18: */
/* Indicates presence of PQfullProtocolVersion */
#define LIBPQ_HAS_FULL_PROTOCOL_VERSION 1
+/* Indicates presence of the PQAUTHDATA_PROMPT_OAUTH_DEVICE authdata hook */
+#define LIBPQ_HAS_PROMPT_OAUTH_DEVICE 1
/*
* Option flags for PQcopyResult
PQ_PIPELINE_ABORTED
} PGpipelineStatus;
+typedef enum
+{
+ PQAUTHDATA_PROMPT_OAUTH_DEVICE, /* user must visit a device-authorization
+ * URL */
+ PQAUTHDATA_OAUTH_BEARER_TOKEN, /* server requests an OAuth Bearer token */
+} PGauthData;
+
/* PGconn encapsulates a connection to the backend.
* The contents of this struct are not supposed to be known to applications.
*/
/* === in fe-auth.c === */
+typedef struct _PGpromptOAuthDevice
+{
+ const char *verification_uri; /* verification URI to visit */
+ const char *user_code; /* user code to enter */
+ const char *verification_uri_complete; /* optional combination of URI and
+ * code, or NULL */
+ int expires_in; /* seconds until user code expires */
+} PGpromptOAuthDevice;
+
+/* for PGoauthBearerRequest.async() */
+#ifdef _WIN32
+#define SOCKTYPE uintptr_t /* avoids depending on winsock2.h for SOCKET */
+#else
+#define SOCKTYPE int
+#endif
+
+typedef struct _PGoauthBearerRequest
+{
+ /* Hook inputs (constant across all calls) */
+ const char *const openid_configuration; /* OIDC discovery URI */
+ const char *const scope; /* required scope(s), or NULL */
+
+ /* Hook outputs */
+
+ /*---------
+ * Callback implementing a custom asynchronous OAuth flow.
+ *
+ * The callback may return
+ * - PGRES_POLLING_READING/WRITING, to indicate that a socket descriptor
+ * has been stored in *altsock and libpq should wait until it is
+ * readable or writable before calling back;
+ * - PGRES_POLLING_OK, to indicate that the flow is complete and
+ * request->token has been set; or
+ * - PGRES_POLLING_FAILED, to indicate that token retrieval has failed.
+ *
+ * This callback is optional. If the token can be obtained without
+ * blocking during the original call to the PQAUTHDATA_OAUTH_BEARER_TOKEN
+ * hook, it may be returned directly, but one of request->async or
+ * request->token must be set by the hook.
+ */
+ PostgresPollingStatusType (*async) (PGconn *conn,
+ struct _PGoauthBearerRequest *request,
+ SOCKTYPE * altsock);
+
+ /*
+ * Callback to clean up custom allocations. A hook implementation may use
+ * this to free request->token and any resources in request->user.
+ *
+ * This is technically optional, but highly recommended, because there is
+ * no other indication as to when it is safe to free the token.
+ */
+ void (*cleanup) (PGconn *conn, struct _PGoauthBearerRequest *request);
+
+ /*
+ * The hook should set this to the Bearer token contents for the
+ * connection, once the flow is completed. The token contents must remain
+ * available to libpq until the hook's cleanup callback is called.
+ */
+ char *token;
+
+ /*
+ * Hook-defined data. libpq will not modify this pointer across calls to
+ * the async callback, so it can be used to keep track of
+ * application-specific state. Resources allocated here should be freed by
+ * the cleanup callback.
+ */
+ void *user;
+} PGoauthBearerRequest;
+
+#undef SOCKTYPE
+
extern char *PQencryptPassword(const char *passwd, const char *user);
extern char *PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user, const char *algorithm);
extern PGresult *PQchangePassword(PGconn *conn, const char *user, const char *passwd);
+typedef int (*PQauthDataHook_type) (PGauthData type, PGconn *conn, void *data);
+extern void PQsetAuthDataHook(PQauthDataHook_type hook);
+extern PQauthDataHook_type PQgetAuthDataHook(void);
+extern int PQdefaultAuthDataHook(PGauthData type, PGconn *conn, void *data);
+
/* === in encnames.c === */
extern int pg_char_to_encoding(const char *name);
* cancel request, instead of being a normal
* connection that's used for queries */
+ /* OAuth v2 */
+ char *oauth_issuer; /* token issuer/URL */
+ char *oauth_issuer_id; /* token issuer identifier */
+ char *oauth_discovery_uri; /* URI of the issuer's discovery
+ * document */
+ char *oauth_client_id; /* client identifier */
+ char *oauth_client_secret; /* client secret */
+ char *oauth_scope; /* access token scope */
+ char *oauth_token; /* access token */
+ bool oauth_want_retry; /* should we retry on failure? */
+
/* Optional file to write trace info to */
FILE *Pfdebug;
int traceFlags;
* the server? */
uint32 allowed_auth_methods; /* bitmask of acceptable AuthRequest
* codes */
- const pg_fe_sasl_mech *allowed_sasl_mechs[1]; /* and acceptable SASL
+ const pg_fe_sasl_mech *allowed_sasl_mechs[2]; /* and acceptable SASL
* mechanisms */
bool client_finished_auth; /* have we finished our half of the
* authentication exchange? */
# Copyright (c) 2022-2025, PostgreSQL Global Development Group
libpq_sources = files(
+ 'fe-auth-oauth.c',
'fe-auth-scram.c',
'fe-auth.c',
'fe-cancel.c',
)
endif
+if libcurl.found()
+ libpq_sources += files('fe-auth-oauth-curl.c')
+endif
+
export_file = custom_target('libpq.exports',
kwargs: gen_export_kwargs,
)
'gssapi': gssapi,
'icu': icu,
'ldap': ldap,
+ 'libcurl': libcurl,
'libxml': libxml,
'libxslt': libxslt,
'llvm': llvm,
$node->connect_fails(
"user=scram_role require_auth=!scram-sha-256",
"SCRAM authentication forbidden, fails with SCRAM auth",
- expected_stderr => qr/server requested SASL authentication/);
+ expected_stderr => qr/server requested SCRAM-SHA-256 authentication/);
$node->connect_fails(
"user=scram_role require_auth=!password,!md5,!scram-sha-256",
"multiple authentication types forbidden, fails with SCRAM auth",
- expected_stderr => qr/server requested SASL authentication/);
+ expected_stderr => qr/server requested SCRAM-SHA-256 authentication/);
# Test that bad passwords are rejected.
$ENV{"PGPASSWORD"} = 'badpass';
"user=scram_role require_auth=!scram-sha-256",
"password authentication forbidden, fails with SCRAM auth",
expected_stderr =>
- qr/authentication method requirement "!scram-sha-256" failed: server requested SASL authentication/
+ qr/authentication method requirement "!scram-sha-256" failed: server requested SCRAM-SHA-256 authentication/
);
$node->connect_fails(
"user=scram_role require_auth=!password,!md5,!scram-sha-256",
"multiple authentication types forbidden, fails with SCRAM auth",
expected_stderr =>
- qr/authentication method requirement "!password,!md5,!scram-sha-256" failed: server requested SASL authentication/
+ qr/authentication method requirement "!password,!md5,!scram-sha-256" failed: server requested SCRAM-SHA-256 authentication/
);
# Test SYSTEM_USER <> NULL with parallel workers.
dummy_index_am \
dummy_seclabel \
libpq_pipeline \
+ oauth_validator \
plsample \
spgist_name_ops \
test_bloomfilter \
subdir('injection_points')
subdir('ldap_password_func')
subdir('libpq_pipeline')
+subdir('oauth_validator')
subdir('plsample')
subdir('spgist_name_ops')
subdir('ssl_passphrase_callback')
--- /dev/null
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
--- /dev/null
+#-------------------------------------------------------------------------
+#
+# Makefile for src/test/modules/oauth_validator
+#
+# Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+# Portions Copyright (c) 1994, Regents of the University of California
+#
+# src/test/modules/oauth_validator/Makefile
+#
+#-------------------------------------------------------------------------
+
+MODULES = validator fail_validator magic_validator
+PGFILEDESC = "validator - test OAuth validator module"
+
+PROGRAM = oauth_hook_client
+PGAPPICON = win32
+OBJS = $(WIN32RES) oauth_hook_client.o
+
+PG_CPPFLAGS = -I$(libpq_srcdir)
+PG_LIBS_INTERNAL += $(libpq_pgport)
+
+NO_INSTALLCHECK = 1
+
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/oauth_validator
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+
+export PYTHON
+export with_libcurl
+export with_python
+
+endif
--- /dev/null
+Test programs and libraries for OAuth
+-------------------------------------
+
+This folder contains tests for the client- and server-side OAuth
+implementations. Most tests are run end-to-end to test both simultaneously. The
+tests in t/001_server use a mock OAuth authorization server, implemented jointly
+by t/OAuth/Server.pm and t/oauth_server.py, to run the libpq Device
+Authorization flow. The tests in t/002_client exercise custom OAuth flows and
+don't need an authorization server.
+
+Tests in this folder require 'oauth' to be present in PG_TEST_EXTRA, since
+HTTPS servers listening on localhost with TCP/IP sockets will be started. A
+Python installation is required to run the mock authorization server.
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * fail_validator.c
+ * Test module for serverside OAuth token validation callbacks, which is
+ * guaranteed to always fail in the validation callback
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/test/modules/oauth_validator/fail_validator.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "libpq/oauth.h"
+
+PG_MODULE_MAGIC;
+
+static bool fail_token(const ValidatorModuleState *state,
+ const char *token,
+ const char *role,
+ ValidatorModuleResult *result);
+
+/* Callback implementations (we only need the main one) */
+static const OAuthValidatorCallbacks validator_callbacks = {
+ PG_OAUTH_VALIDATOR_MAGIC,
+
+ .validate_cb = fail_token,
+};
+
+const OAuthValidatorCallbacks *
+_PG_oauth_validator_module_init(void)
+{
+ return &validator_callbacks;
+}
+
+static bool
+fail_token(const ValidatorModuleState *state,
+ const char *token, const char *role,
+ ValidatorModuleResult *res)
+{
+ elog(FATAL, "fail_validator: sentinel error");
+ pg_unreachable();
+}
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * magic_validator.c
+ * Test module for serverside OAuth token validation callbacks, which
+ * should fail due to using the wrong PG_OAUTH_VALIDATOR_MAGIC marker
+ * and thus the wrong ABI version
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/test/modules/oauth_validator/magic_validator.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "libpq/oauth.h"
+
+PG_MODULE_MAGIC;
+
+static bool validate_token(const ValidatorModuleState *state,
+ const char *token,
+ const char *role,
+ ValidatorModuleResult *result);
+
+/* Callback implementations (we only need the main one) */
+static const OAuthValidatorCallbacks validator_callbacks = {
+ 0xdeadbeef,
+
+ .validate_cb = validate_token,
+};
+
+const OAuthValidatorCallbacks *
+_PG_oauth_validator_module_init(void)
+{
+ return &validator_callbacks;
+}
+
+static bool
+validate_token(const ValidatorModuleState *state,
+ const char *token, const char *role,
+ ValidatorModuleResult *res)
+{
+ elog(FATAL, "magic_validator: this should be unreachable");
+ pg_unreachable();
+}
--- /dev/null
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+validator_sources = files(
+ 'validator.c',
+)
+
+if host_system == 'windows'
+ validator_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'validator',
+ '--FILEDESC', 'validator - test OAuth validator module',])
+endif
+
+validator = shared_module('validator',
+ validator_sources,
+ kwargs: pg_test_mod_args,
+)
+test_install_libs += validator
+
+fail_validator_sources = files(
+ 'fail_validator.c',
+)
+
+if host_system == 'windows'
+ fail_validator_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'fail_validator',
+ '--FILEDESC', 'fail_validator - failing OAuth validator module',])
+endif
+
+fail_validator = shared_module('fail_validator',
+ fail_validator_sources,
+ kwargs: pg_test_mod_args,
+)
+test_install_libs += fail_validator
+
+magic_validator_sources = files(
+ 'magic_validator.c',
+)
+
+if host_system == 'windows'
+ magic_validator_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'magic_validator',
+ '--FILEDESC', 'magic_validator - ABI incompatible OAuth validator module',])
+endif
+
+magic_validator = shared_module('magic_validator',
+ magic_validator_sources,
+ kwargs: pg_test_mod_args,
+)
+test_install_libs += magic_validator
+
+oauth_hook_client_sources = files(
+ 'oauth_hook_client.c',
+)
+
+if host_system == 'windows'
+ oauth_hook_client_sources += rc_bin_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'oauth_hook_client',
+ '--FILEDESC', 'oauth_hook_client - test program for libpq OAuth hooks',])
+endif
+
+oauth_hook_client = executable('oauth_hook_client',
+ oauth_hook_client_sources,
+ dependencies: [frontend_code, libpq],
+ kwargs: default_bin_args + {
+ 'install': false,
+ },
+)
+testprep_targets += oauth_hook_client
+
+tests += {
+ 'name': 'oauth_validator',
+ 'sd': meson.current_source_dir(),
+ 'bd': meson.current_build_dir(),
+ 'tap': {
+ 'tests': [
+ 't/001_server.pl',
+ 't/002_client.pl',
+ ],
+ 'env': {
+ 'PYTHON': python.path(),
+ 'with_libcurl': libcurl.found() ? 'yes' : 'no',
+ 'with_python': 'yes',
+ },
+ },
+}
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * oauth_hook_client.c
+ * Test driver for t/002_client.pl, which verifies OAuth hook
+ * functionality in libpq.
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/test/modules/oauth_validator/oauth_hook_client.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres_fe.h"
+
+#include <sys/socket.h>
+
+#include "getopt_long.h"
+#include "libpq-fe.h"
+
+static int handle_auth_data(PGauthData type, PGconn *conn, void *data);
+static PostgresPollingStatusType async_cb(PGconn *conn,
+ PGoauthBearerRequest *req,
+ pgsocket *altsock);
+static PostgresPollingStatusType misbehave_cb(PGconn *conn,
+ PGoauthBearerRequest *req,
+ pgsocket *altsock);
+
+static void
+usage(char *argv[])
+{
+ printf("usage: %s [flags] CONNINFO\n\n", argv[0]);
+
+ printf("recognized flags:\n");
+ printf(" -h, --help show this message\n");
+ printf(" --expected-scope SCOPE fail if received scopes do not match SCOPE\n");
+ printf(" --expected-uri URI fail if received configuration link does not match URI\n");
+ printf(" --misbehave=MODE have the hook fail required postconditions\n"
+ " (MODEs: no-hook, fail-async, no-token, no-socket)\n");
+ printf(" --no-hook don't install OAuth hooks\n");
+ printf(" --hang-forever don't ever return a token (combine with connect_timeout)\n");
+ printf(" --token TOKEN use the provided TOKEN value\n");
+ printf(" --stress-async busy-loop on PQconnectPoll rather than polling\n");
+}
+
+/* --options */
+static bool no_hook = false;
+static bool hang_forever = false;
+static bool stress_async = false;
+static const char *expected_uri = NULL;
+static const char *expected_scope = NULL;
+static const char *misbehave_mode = NULL;
+static char *token = NULL;
+
+int
+main(int argc, char *argv[])
+{
+ static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h'},
+
+ {"expected-scope", required_argument, NULL, 1000},
+ {"expected-uri", required_argument, NULL, 1001},
+ {"no-hook", no_argument, NULL, 1002},
+ {"token", required_argument, NULL, 1003},
+ {"hang-forever", no_argument, NULL, 1004},
+ {"misbehave", required_argument, NULL, 1005},
+ {"stress-async", no_argument, NULL, 1006},
+ {0}
+ };
+
+ const char *conninfo;
+ PGconn *conn;
+ int c;
+
+ while ((c = getopt_long(argc, argv, "h", long_options, NULL)) != -1)
+ {
+ switch (c)
+ {
+ case 'h':
+ usage(argv);
+ return 0;
+
+ case 1000: /* --expected-scope */
+ expected_scope = optarg;
+ break;
+
+ case 1001: /* --expected-uri */
+ expected_uri = optarg;
+ break;
+
+ case 1002: /* --no-hook */
+ no_hook = true;
+ break;
+
+ case 1003: /* --token */
+ token = optarg;
+ break;
+
+ case 1004: /* --hang-forever */
+ hang_forever = true;
+ break;
+
+ case 1005: /* --misbehave */
+ misbehave_mode = optarg;
+ break;
+
+ case 1006: /* --stress-async */
+ stress_async = true;
+ break;
+
+ default:
+ usage(argv);
+ return 1;
+ }
+ }
+
+ if (argc != optind + 1)
+ {
+ usage(argv);
+ return 1;
+ }
+
+ conninfo = argv[optind];
+
+ /* Set up our OAuth hooks. */
+ PQsetAuthDataHook(handle_auth_data);
+
+ /* Connect. (All the actual work is in the hook.) */
+ if (stress_async)
+ {
+ /*
+ * Perform an asynchronous connection, busy-looping on PQconnectPoll()
+ * without actually waiting on socket events. This stresses code paths
+ * that rely on asynchronous work to be done before continuing with
+ * the next step in the flow.
+ */
+ PostgresPollingStatusType res;
+
+ conn = PQconnectStart(conninfo);
+
+ do
+ {
+ res = PQconnectPoll(conn);
+ } while (res != PGRES_POLLING_FAILED && res != PGRES_POLLING_OK);
+ }
+ else
+ {
+ /* Perform a standard synchronous connection. */
+ conn = PQconnectdb(conninfo);
+ }
+
+ if (PQstatus(conn) != CONNECTION_OK)
+ {
+ fprintf(stderr, "connection to database failed: %s\n",
+ PQerrorMessage(conn));
+ PQfinish(conn);
+ return 1;
+ }
+
+ printf("connection succeeded\n");
+ PQfinish(conn);
+ return 0;
+}
+
+/*
+ * PQauthDataHook implementation. Replaces the default client flow by handling
+ * PQAUTHDATA_OAUTH_BEARER_TOKEN.
+ */
+static int
+handle_auth_data(PGauthData type, PGconn *conn, void *data)
+{
+ PGoauthBearerRequest *req = data;
+
+ if (no_hook || (type != PQAUTHDATA_OAUTH_BEARER_TOKEN))
+ return 0;
+
+ if (hang_forever)
+ {
+ /* Start asynchronous processing. */
+ req->async = async_cb;
+ return 1;
+ }
+
+ if (misbehave_mode)
+ {
+ if (strcmp(misbehave_mode, "no-hook") != 0)
+ req->async = misbehave_cb;
+ return 1;
+ }
+
+ if (expected_uri)
+ {
+ if (!req->openid_configuration)
+ {
+ fprintf(stderr, "expected URI \"%s\", got NULL\n", expected_uri);
+ return -1;
+ }
+
+ if (strcmp(expected_uri, req->openid_configuration) != 0)
+ {
+ fprintf(stderr, "expected URI \"%s\", got \"%s\"\n", expected_uri, req->openid_configuration);
+ return -1;
+ }
+ }
+
+ if (expected_scope)
+ {
+ if (!req->scope)
+ {
+ fprintf(stderr, "expected scope \"%s\", got NULL\n", expected_scope);
+ return -1;
+ }
+
+ if (strcmp(expected_scope, req->scope) != 0)
+ {
+ fprintf(stderr, "expected scope \"%s\", got \"%s\"\n", expected_scope, req->scope);
+ return -1;
+ }
+ }
+
+ req->token = token;
+ return 1;
+}
+
+static PostgresPollingStatusType
+async_cb(PGconn *conn, PGoauthBearerRequest *req, pgsocket *altsock)
+{
+ if (hang_forever)
+ {
+ /*
+ * This code tests that nothing is interfering with libpq's handling
+ * of connect_timeout.
+ */
+ static pgsocket sock = PGINVALID_SOCKET;
+
+ if (sock == PGINVALID_SOCKET)
+ {
+ /* First call. Create an unbound socket to wait on. */
+#ifdef WIN32
+ WSADATA wsaData;
+ int err;
+
+ err = WSAStartup(MAKEWORD(2, 2), &wsaData);
+ if (err)
+ {
+ perror("WSAStartup failed");
+ return PGRES_POLLING_FAILED;
+ }
+#endif
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock == PGINVALID_SOCKET)
+ {
+ perror("failed to create datagram socket");
+ return PGRES_POLLING_FAILED;
+ }
+ }
+
+ /* Make libpq wait on the (unreadable) socket. */
+ *altsock = sock;
+ return PGRES_POLLING_READING;
+ }
+
+ req->token = token;
+ return PGRES_POLLING_OK;
+}
+
+static PostgresPollingStatusType
+misbehave_cb(PGconn *conn, PGoauthBearerRequest *req, pgsocket *altsock)
+{
+ if (strcmp(misbehave_mode, "fail-async") == 0)
+ {
+ /* Just fail "normally". */
+ return PGRES_POLLING_FAILED;
+ }
+ else if (strcmp(misbehave_mode, "no-token") == 0)
+ {
+ /* Callbacks must assign req->token before returning OK. */
+ return PGRES_POLLING_OK;
+ }
+ else if (strcmp(misbehave_mode, "no-socket") == 0)
+ {
+ /* Callbacks must assign *altsock before asking for polling. */
+ return PGRES_POLLING_READING;
+ }
+ else
+ {
+ fprintf(stderr, "unrecognized --misbehave mode: %s\n", misbehave_mode);
+ exit(1);
+ }
+}
--- /dev/null
+
+#
+# Tests the libpq builtin OAuth flow, as well as server-side HBA and validator
+# setup.
+#
+# Copyright (c) 2021-2025, PostgreSQL Global Development Group
+#
+
+use strict;
+use warnings FATAL => 'all';
+
+use JSON::PP qw(encode_json);
+use MIME::Base64 qw(encode_base64);
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+use FindBin;
+use lib $FindBin::RealBin;
+
+use OAuth::Server;
+
+if (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\boauth\b/)
+{
+ plan skip_all =>
+ 'Potentially unsafe test oauth not enabled in PG_TEST_EXTRA';
+}
+
+if ($windows_os)
+{
+ plan skip_all => 'OAuth server-side tests are not supported on Windows';
+}
+
+if ($ENV{with_libcurl} ne 'yes')
+{
+ plan skip_all => 'client-side OAuth not supported by this build';
+}
+
+if ($ENV{with_python} ne 'yes')
+{
+ plan skip_all => 'OAuth tests require --with-python to run';
+}
+
+my $node = PostgreSQL::Test::Cluster->new('primary');
+$node->init;
+$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf',
+ "oauth_validator_libraries = 'validator'\n");
+$node->start;
+
+$node->safe_psql('postgres', 'CREATE USER test;');
+$node->safe_psql('postgres', 'CREATE USER testalt;');
+$node->safe_psql('postgres', 'CREATE USER testparam;');
+
+# Save a background connection for later configuration changes.
+my $bgconn = $node->background_psql('postgres');
+
+my $webserver = OAuth::Server->new();
+$webserver->run();
+
+END
+{
+ my $exit_code = $?;
+
+ $webserver->stop() if defined $webserver; # might have been SKIP'd
+
+ $? = $exit_code;
+}
+
+my $port = $webserver->port();
+my $issuer = "http://localhost:$port";
+
+unlink($node->data_dir . '/pg_hba.conf');
+$node->append_conf(
+ 'pg_hba.conf', qq{
+local all test oauth issuer="$issuer" scope="openid postgres"
+local all testalt oauth issuer="$issuer/.well-known/oauth-authorization-server/alternate" scope="openid postgres alt"
+local all testparam oauth issuer="$issuer/param" scope="openid postgres"
+});
+$node->reload;
+
+my $log_start = $node->wait_for_log(qr/reloading configuration files/);
+
+# Check pg_hba_file_rules() support.
+my $contents = $bgconn->query_safe(
+ qq(SELECT rule_number, auth_method, options
+ FROM pg_hba_file_rules
+ ORDER BY rule_number;));
+is( $contents,
+ qq{1|oauth|\{issuer=$issuer,"scope=openid postgres",validator=validator\}
+2|oauth|\{issuer=$issuer/.well-known/oauth-authorization-server/alternate,"scope=openid postgres alt",validator=validator\}
+3|oauth|\{issuer=$issuer/param,"scope=openid postgres",validator=validator\}},
+ "pg_hba_file_rules recreates OAuth HBA settings");
+
+# To test against HTTP rather than HTTPS, we need to enable PGOAUTHDEBUG. But
+# first, check to make sure the client refuses such connections by default.
+$node->connect_fails(
+ "user=test dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635",
+ "HTTPS is required without debug mode",
+ expected_stderr =>
+ qr@OAuth discovery URI "\Q$issuer\E/.well-known/openid-configuration" must use HTTPS@
+);
+
+$ENV{PGOAUTHDEBUG} = "UNSAFE";
+
+my $user = "test";
+$node->connect_ok(
+ "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635",
+ "connect as test",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@,
+ log_like => [
+ qr/oauth_validator: token="9243959234", role="$user"/,
+ qr/oauth_validator: issuer="\Q$issuer\E", scope="openid postgres"/,
+ qr/connection authenticated: identity="test" method=oauth/,
+ qr/connection authorized/,
+ ]);
+
+# The /alternate issuer uses slightly different parameters, along with an
+# OAuth-style discovery document.
+$user = "testalt";
+$node->connect_ok(
+ "user=$user dbname=postgres oauth_issuer=$issuer/alternate oauth_client_id=f02c6361-0636",
+ "connect as testalt",
+ expected_stderr =>
+ qr@Visit https://example\.org/ and enter the code: postgresuser@,
+ log_like => [
+ qr/oauth_validator: token="9243959234-alt", role="$user"/,
+ qr|oauth_validator: issuer="\Q$issuer/.well-known/oauth-authorization-server/alternate\E", scope="openid postgres alt"|,
+ qr/connection authenticated: identity="testalt" method=oauth/,
+ qr/connection authorized/,
+ ]);
+
+# The issuer linked by the server must match the client's oauth_issuer setting.
+$node->connect_fails(
+ "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0636",
+ "oauth_issuer must match discovery",
+ expected_stderr =>
+ qr@server's discovery document at \Q$issuer/.well-known/oauth-authorization-server/alternate\E \(issuer "\Q$issuer/alternate\E"\) is incompatible with oauth_issuer \(\Q$issuer\E\)@
+);
+
+# Test require_auth settings against OAUTHBEARER.
+my @cases = (
+ { require_auth => "oauth" },
+ { require_auth => "oauth,scram-sha-256" },
+ { require_auth => "password,oauth" },
+ { require_auth => "none,oauth" },
+ { require_auth => "!scram-sha-256" },
+ { require_auth => "!none" },
+
+ {
+ require_auth => "!oauth",
+ failure => qr/server requested OAUTHBEARER authentication/
+ },
+ {
+ require_auth => "scram-sha-256",
+ failure => qr/server requested OAUTHBEARER authentication/
+ },
+ {
+ require_auth => "!password,!oauth",
+ failure => qr/server requested OAUTHBEARER authentication/
+ },
+ {
+ require_auth => "none",
+ failure => qr/server requested SASL authentication/
+ },
+ {
+ require_auth => "!oauth,!scram-sha-256",
+ failure => qr/server requested SASL authentication/
+ });
+
+$user = "test";
+foreach my $c (@cases)
+{
+ my $connstr =
+ "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635 require_auth=$c->{'require_auth'}";
+
+ if (defined $c->{'failure'})
+ {
+ $node->connect_fails(
+ $connstr,
+ "require_auth=$c->{'require_auth'} fails",
+ expected_stderr => $c->{'failure'});
+ }
+ else
+ {
+ $node->connect_ok(
+ $connstr,
+ "require_auth=$c->{'require_auth'} succeeds",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@
+ );
+ }
+}
+
+# Make sure the client_id and secret are correctly encoded. $vschars contains
+# every allowed character for a client_id/_secret (the "VSCHAR" class).
+# $vschars_esc is additionally backslash-escaped for inclusion in a
+# single-quoted connection string.
+my $vschars =
+ " !\"#\$%&'()*+,-./0123456789:;<=>?\@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~";
+my $vschars_esc =
+ " !\"#\$%&\\'()*+,-./0123456789:;<=>?\@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~";
+
+$node->connect_ok(
+ "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id='$vschars_esc'",
+ "escapable characters: client_id",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_ok(
+ "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id='$vschars_esc' oauth_client_secret='$vschars_esc'",
+ "escapable characters: client_id and secret",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+
+#
+# Further tests rely on support for specific behaviors in oauth_server.py. To
+# trigger these behaviors, we ask for the special issuer .../param (which is set
+# up in HBA for the testparam user) and encode magic instructions into the
+# oauth_client_id.
+#
+
+my $common_connstr =
+ "user=testparam dbname=postgres oauth_issuer=$issuer/param ";
+my $base_connstr = $common_connstr;
+
+sub connstr
+{
+ my (%params) = @_;
+
+ my $json = encode_json(\%params);
+ my $encoded = encode_base64($json, "");
+
+ return "$base_connstr oauth_client_id=$encoded";
+}
+
+# Make sure the param system works end-to-end first.
+$node->connect_ok(
+ connstr(),
+ "connect to /param",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+
+$node->connect_ok(
+ connstr(stage => 'token', retries => 1),
+ "token retry",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_ok(
+ connstr(stage => 'token', retries => 2),
+ "token retry (twice)",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_ok(
+ connstr(stage => 'all', retries => 1, interval => 2),
+ "token retry (two second interval)",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_ok(
+ connstr(stage => 'all', retries => 1, interval => JSON::PP::null),
+ "token retry (default interval)",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+
+$node->connect_ok(
+ connstr(stage => 'all', content_type => 'application/json;charset=utf-8'),
+ "content type with charset",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_ok(
+ connstr(
+ stage => 'all',
+ content_type => "application/json \t;\t charset=utf-8"),
+ "content type with charset (whitespace)",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_ok(
+ connstr(stage => 'device', uri_spelling => "verification_url"),
+ "alternative spelling of verification_uri",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+
+$node->connect_fails(
+ connstr(stage => 'device', huge_response => JSON::PP::true),
+ "bad device authz response: overlarge JSON",
+ expected_stderr =>
+ qr/failed to obtain device authorization: response is too large/);
+$node->connect_fails(
+ connstr(stage => 'token', huge_response => JSON::PP::true),
+ "bad token response: overlarge JSON",
+ expected_stderr =>
+ qr/failed to obtain access token: response is too large/);
+
+$node->connect_fails(
+ connstr(stage => 'device', content_type => 'text/plain'),
+ "bad device authz response: wrong content type",
+ expected_stderr =>
+ qr/failed to parse device authorization: unexpected content type/);
+$node->connect_fails(
+ connstr(stage => 'token', content_type => 'text/plain'),
+ "bad token response: wrong content type",
+ expected_stderr =>
+ qr/failed to parse access token response: unexpected content type/);
+$node->connect_fails(
+ connstr(stage => 'token', content_type => 'application/jsonx'),
+ "bad token response: wrong content type (correct prefix)",
+ expected_stderr =>
+ qr/failed to parse access token response: unexpected content type/);
+
+$node->connect_fails(
+ connstr(
+ stage => 'all',
+ interval => ~0,
+ retries => 1,
+ retry_code => "slow_down"),
+ "bad token response: server overflows the device authz interval",
+ expected_stderr =>
+ qr/failed to obtain access token: slow_down interval overflow/);
+
+$node->connect_fails(
+ connstr(stage => 'token', error_code => "invalid_grant"),
+ "bad token response: invalid_grant, no description",
+ expected_stderr => qr/failed to obtain access token: \(invalid_grant\)/);
+$node->connect_fails(
+ connstr(
+ stage => 'token',
+ error_code => "invalid_grant",
+ error_desc => "grant expired"),
+ "bad token response: expired grant",
+ expected_stderr =>
+ qr/failed to obtain access token: grant expired \(invalid_grant\)/);
+$node->connect_fails(
+ connstr(
+ stage => 'token',
+ error_code => "invalid_client",
+ error_status => 401),
+ "bad token response: client authentication failure, default description",
+ expected_stderr =>
+ qr/failed to obtain access token: provider requires client authentication, and no oauth_client_secret is set \(invalid_client\)/
+);
+$node->connect_fails(
+ connstr(
+ stage => 'token',
+ error_code => "invalid_client",
+ error_status => 401,
+ error_desc => "authn failure"),
+ "bad token response: client authentication failure, provided description",
+ expected_stderr =>
+ qr/failed to obtain access token: authn failure \(invalid_client\)/);
+
+$node->connect_fails(
+ connstr(stage => 'token', token => ""),
+ "server rejects access: empty token",
+ expected_stderr => qr/bearer authentication failed/);
+$node->connect_fails(
+ connstr(stage => 'token', token => "****"),
+ "server rejects access: invalid token contents",
+ expected_stderr => qr/bearer authentication failed/);
+
+# Test behavior of the oauth_client_secret.
+$base_connstr = "$common_connstr oauth_client_secret=''";
+
+$node->connect_ok(
+ connstr(stage => 'all', expected_secret => ''),
+ "empty oauth_client_secret",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+
+$base_connstr = "$common_connstr oauth_client_secret='$vschars_esc'";
+
+$node->connect_ok(
+ connstr(stage => 'all', expected_secret => $vschars),
+ "nonempty oauth_client_secret",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+
+$node->connect_fails(
+ connstr(
+ stage => 'token',
+ error_code => "invalid_client",
+ error_status => 401),
+ "bad token response: client authentication failure, default description with oauth_client_secret",
+ expected_stderr =>
+ qr/failed to obtain access token: provider rejected the oauth_client_secret \(invalid_client\)/
+);
+$node->connect_fails(
+ connstr(
+ stage => 'token',
+ error_code => "invalid_client",
+ error_status => 401,
+ error_desc => "mutual TLS required for client"),
+ "bad token response: client authentication failure, provided description with oauth_client_secret",
+ expected_stderr =>
+ qr/failed to obtain access token: mutual TLS required for client \(invalid_client\)/
+);
+
+# Stress test: make sure our builtin flow operates correctly even if the client
+# application isn't respecting PGRES_POLLING_READING/WRITING signals returned
+# from PQconnectPoll().
+$base_connstr =
+ "$common_connstr port=" . $node->port . " host=" . $node->host;
+my @cmd = (
+ "oauth_hook_client", "--no-hook", "--stress-async",
+ connstr(stage => 'all', retries => 1, interval => 1));
+
+note "running '" . join("' '", @cmd) . "'";
+my ($stdout, $stderr) = run_command(\@cmd);
+
+like($stdout, qr/connection succeeded/, "stress-async: stdout matches");
+unlike(
+ $stderr,
+ qr/connection to database failed/,
+ "stress-async: stderr matches");
+
+#
+# This section of tests reconfigures the validator module itself, rather than
+# the OAuth server.
+#
+
+# Searching the logs is easier if OAuth parameter discovery isn't cluttering
+# things up; hardcode the discovery URI. (Scope is hardcoded to empty to cover
+# that case as well.)
+$common_connstr =
+ "dbname=postgres oauth_issuer=$issuer/.well-known/openid-configuration oauth_scope='' oauth_client_id=f02c6361-0635";
+
+# Misbehaving validators must fail shut.
+$bgconn->query_safe("ALTER SYSTEM SET oauth_validator.authn_id TO ''");
+$node->reload;
+$log_start =
+ $node->wait_for_log(qr/reloading configuration files/, $log_start);
+
+$node->connect_fails(
+ "$common_connstr user=test",
+ "validator must set authn_id",
+ expected_stderr => qr/OAuth bearer authentication failed/,
+ log_like => [
+ qr/connection authenticated: identity=""/,
+ qr/DETAIL:\s+Validator provided no identity/,
+ qr/FATAL:\s+OAuth bearer authentication failed/,
+ ]);
+
+# Even if a validator authenticates the user, if the token isn't considered
+# valid, the connection fails.
+$bgconn->query_safe(
+ "ALTER SYSTEM SET oauth_validator.authn_id TO 'test\@example.org'");
+$bgconn->query_safe(
+ "ALTER SYSTEM SET oauth_validator.authorize_tokens TO false");
+$node->reload;
+$log_start =
+ $node->wait_for_log(qr/reloading configuration files/, $log_start);
+
+$node->connect_fails(
+ "$common_connstr user=test",
+ "validator must authorize token explicitly",
+ expected_stderr => qr/OAuth bearer authentication failed/,
+ log_like => [
+ qr/connection authenticated: identity="test\@example\.org"/,
+ qr/DETAIL:\s+Validator failed to authorize the provided token/,
+ qr/FATAL:\s+OAuth bearer authentication failed/,
+ ]);
+
+#
+# Test user mapping.
+#
+
+# Allow "user@example.com" to log in under the test role.
+unlink($node->data_dir . '/pg_ident.conf');
+$node->append_conf(
+ 'pg_ident.conf', qq{
+oauthmap user\@example.com test
+});
+
+# test and testalt use the map; testparam uses ident delegation.
+unlink($node->data_dir . '/pg_hba.conf');
+$node->append_conf(
+ 'pg_hba.conf', qq{
+local all test oauth issuer="$issuer" scope="" map=oauthmap
+local all testalt oauth issuer="$issuer" scope="" map=oauthmap
+local all testparam oauth issuer="$issuer" scope="" delegate_ident_mapping=1
+});
+
+# To start, have the validator use the role names as authn IDs.
+$bgconn->query_safe("ALTER SYSTEM RESET oauth_validator.authn_id");
+$bgconn->query_safe("ALTER SYSTEM RESET oauth_validator.authorize_tokens");
+
+$node->reload;
+$log_start =
+ $node->wait_for_log(qr/reloading configuration files/, $log_start);
+
+# The test and testalt roles should no longer map correctly.
+$node->connect_fails(
+ "$common_connstr user=test",
+ "mismatched username map (test)",
+ expected_stderr => qr/OAuth bearer authentication failed/);
+$node->connect_fails(
+ "$common_connstr user=testalt",
+ "mismatched username map (testalt)",
+ expected_stderr => qr/OAuth bearer authentication failed/);
+
+# Have the validator identify the end user as user@example.com.
+$bgconn->query_safe(
+ "ALTER SYSTEM SET oauth_validator.authn_id TO 'user\@example.com'");
+$node->reload;
+$log_start =
+ $node->wait_for_log(qr/reloading configuration files/, $log_start);
+
+# Now the test role can be logged into. (testalt still can't be mapped.)
+$node->connect_ok(
+ "$common_connstr user=test",
+ "matched username map (test)",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_fails(
+ "$common_connstr user=testalt",
+ "mismatched username map (testalt)",
+ expected_stderr => qr/OAuth bearer authentication failed/);
+
+# testparam ignores the map entirely.
+$node->connect_ok(
+ "$common_connstr user=testparam",
+ "delegated ident (testparam)",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+
+$bgconn->query_safe("ALTER SYSTEM RESET oauth_validator.authn_id");
+$node->reload;
+$log_start =
+ $node->wait_for_log(qr/reloading configuration files/, $log_start);
+
+#
+# Test multiple validators.
+#
+
+$node->append_conf('postgresql.conf',
+ "oauth_validator_libraries = 'validator, fail_validator'\n");
+
+# With multiple validators, every HBA line must explicitly declare one.
+my $result = $node->restart(fail_ok => 1);
+is($result, 0,
+ 'restart fails without explicit validators in oauth HBA entries');
+
+$log_start = $node->wait_for_log(
+ qr/authentication method "oauth" requires argument "validator" to be set/,
+ $log_start);
+
+unlink($node->data_dir . '/pg_hba.conf');
+$node->append_conf(
+ 'pg_hba.conf', qq{
+local all test oauth validator=validator issuer="$issuer" scope="openid postgres"
+local all testalt oauth validator=fail_validator issuer="$issuer/.well-known/oauth-authorization-server/alternate" scope="openid postgres alt"
+});
+$node->restart;
+
+$log_start = $node->wait_for_log(qr/ready to accept connections/, $log_start);
+
+# The test user should work as before.
+$user = "test";
+$node->connect_ok(
+ "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635",
+ "validator is used for $user",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@,
+ log_like => [qr/connection authorized/]);
+
+# testalt should be routed through the fail_validator.
+$user = "testalt";
+$node->connect_fails(
+ "user=$user dbname=postgres oauth_issuer=$issuer/.well-known/oauth-authorization-server/alternate oauth_client_id=f02c6361-0636",
+ "fail_validator is used for $user",
+ expected_stderr => qr/FATAL:\s+fail_validator: sentinel error/);
+
+#
+# Test ABI compatibility magic marker
+#
+$node->append_conf('postgresql.conf',
+ "oauth_validator_libraries = 'magic_validator'\n");
+unlink($node->data_dir . '/pg_hba.conf');
+$node->append_conf(
+ 'pg_hba.conf', qq{
+local all test oauth validator=magic_validator issuer="$issuer" scope="openid postgres"
+});
+$node->restart;
+
+$log_start = $node->wait_for_log(qr/ready to accept connections/, $log_start);
+
+$node->connect_fails(
+ "user=test dbname=postgres oauth_issuer=$issuer/.well-known/oauth-authorization-server/alternate oauth_client_id=f02c6361-0636",
+ "magic_validator is used for $user",
+ expected_stderr =>
+ qr/FATAL:\s+OAuth validator module "magic_validator": magic number mismatch/
+);
+$node->stop;
+
+done_testing();
--- /dev/null
+#
+# Exercises the API for custom OAuth client flows, using the oauth_hook_client
+# test driver.
+#
+# Copyright (c) 2021-2025, PostgreSQL Global Development Group
+#
+
+use strict;
+use warnings FATAL => 'all';
+
+use JSON::PP qw(encode_json);
+use MIME::Base64 qw(encode_base64);
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+if (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\boauth\b/)
+{
+ plan skip_all =>
+ 'Potentially unsafe test oauth not enabled in PG_TEST_EXTRA';
+}
+
+#
+# Cluster Setup
+#
+
+my $node = PostgreSQL::Test::Cluster->new('primary');
+$node->init;
+$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf',
+ "oauth_validator_libraries = 'validator'\n");
+$node->start;
+
+$node->safe_psql('postgres', 'CREATE USER test;');
+
+# These tests don't use the builtin flow, and we don't have an authorization
+# server running, so the address used here shouldn't matter. Use an invalid IP
+# address, so if there's some cascade of errors that causes the client to
+# attempt a connection, we'll fail noisily.
+my $issuer = "https://256.256.256.256";
+my $scope = "openid postgres";
+
+unlink($node->data_dir . '/pg_hba.conf');
+$node->append_conf(
+ 'pg_hba.conf', qq{
+local all test oauth issuer="$issuer" scope="$scope"
+});
+$node->reload;
+
+my ($log_start, $log_end);
+$log_start = $node->wait_for_log(qr/reloading configuration files/);
+
+$ENV{PGOAUTHDEBUG} = "UNSAFE";
+
+#
+# Tests
+#
+
+my $user = "test";
+my $base_connstr = $node->connstr() . " user=$user";
+my $common_connstr =
+ "$base_connstr oauth_issuer=$issuer oauth_client_id=myID";
+
+sub test
+{
+ my ($test_name, %params) = @_;
+
+ my $flags = [];
+ if (defined($params{flags}))
+ {
+ $flags = $params{flags};
+ }
+
+ my @cmd = ("oauth_hook_client", @{$flags}, $common_connstr);
+ note "running '" . join("' '", @cmd) . "'";
+
+ my ($stdout, $stderr) = run_command(\@cmd);
+
+ if (defined($params{expected_stdout}))
+ {
+ like($stdout, $params{expected_stdout}, "$test_name: stdout matches");
+ }
+
+ if (defined($params{expected_stderr}))
+ {
+ like($stderr, $params{expected_stderr}, "$test_name: stderr matches");
+ }
+ else
+ {
+ is($stderr, "", "$test_name: no stderr");
+ }
+}
+
+test(
+ "basic synchronous hook can provide a token",
+ flags => [
+ "--token", "my-token",
+ "--expected-uri", "$issuer/.well-known/openid-configuration",
+ "--expected-scope", $scope,
+ ],
+ expected_stdout => qr/connection succeeded/);
+
+$node->log_check("validator receives correct token",
+ $log_start,
+ log_like => [ qr/oauth_validator: token="my-token", role="$user"/, ]);
+
+if ($ENV{with_libcurl} ne 'yes')
+{
+ # libpq should help users out if no OAuth support is built in.
+ test(
+ "fails without custom hook installed",
+ flags => ["--no-hook"],
+ expected_stderr =>
+ qr/no custom OAuth flows are available, and libpq was not built with libcurl support/
+ );
+}
+
+# connect_timeout should work if the flow doesn't respond.
+$common_connstr = "$common_connstr connect_timeout=1";
+test(
+ "connect_timeout interrupts hung client flow",
+ flags => ["--hang-forever"],
+ expected_stderr => qr/failed: timeout expired/);
+
+# Test various misbehaviors of the client hook.
+my @cases = (
+ {
+ flag => "--misbehave=no-hook",
+ expected_error =>
+ qr/user-defined OAuth flow provided neither a token nor an async callback/,
+ },
+ {
+ flag => "--misbehave=fail-async",
+ expected_error => qr/user-defined OAuth flow failed/,
+ },
+ {
+ flag => "--misbehave=no-token",
+ expected_error => qr/user-defined OAuth flow did not provide a token/,
+ },
+ {
+ flag => "--misbehave=no-socket",
+ expected_error =>
+ qr/user-defined OAuth flow did not provide a socket for polling/,
+ });
+
+foreach my $c (@cases)
+{
+ test(
+ "hook misbehavior: $c->{'flag'}",
+ flags => [ $c->{'flag'} ],
+ expected_stderr => $c->{'expected_error'});
+}
+
+done_testing();
--- /dev/null
+
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+OAuth::Server - runs a mock OAuth authorization server for testing
+
+=head1 SYNOPSIS
+
+ use OAuth::Server;
+
+ my $server = OAuth::Server->new();
+ $server->run;
+
+ my $port = $server->port;
+ my $issuer = "http://localhost:$port";
+
+ # test against $issuer...
+
+ $server->stop;
+
+=head1 DESCRIPTION
+
+This is glue API between the Perl tests and the Python authorization server
+daemon implemented in t/oauth_server.py. (Python has a fairly usable HTTP server
+in its standard library, so the implementation was ported from Perl.)
+
+This authorization server does not use TLS (it implements a nonstandard, unsafe
+issuer at "http://localhost:<port>"), so libpq in particular will need to set
+PGOAUTHDEBUG=UNSAFE to be able to talk to it.
+
+=cut
+
+package OAuth::Server;
+
+use warnings;
+use strict;
+use Scalar::Util;
+use Test::More;
+
+=pod
+
+=head1 METHODS
+
+=over
+
+=item OAuth::Server->new()
+
+Create a new OAuth Server object.
+
+=cut
+
+sub new
+{
+ my $class = shift;
+
+ my $self = {};
+ bless($self, $class);
+
+ return $self;
+}
+
+=pod
+
+=item $server->port()
+
+Returns the port in use by the server.
+
+=cut
+
+sub port
+{
+ my $self = shift;
+
+ return $self->{'port'};
+}
+
+=pod
+
+=item $server->run()
+
+Runs the authorization server daemon in t/oauth_server.py.
+
+=cut
+
+sub run
+{
+ my $self = shift;
+ my $port;
+
+ my $pid = open(my $read_fh, "-|", $ENV{PYTHON}, "t/oauth_server.py")
+ or die "failed to start OAuth server: $!";
+
+ # Get the port number from the daemon. It closes stdout afterwards; that way
+ # we can slurp in the entire contents here rather than worrying about the
+ # number of bytes to read.
+ $port = do { local $/ = undef; <$read_fh> }
+ // die "failed to read port number: $!";
+ chomp $port;
+ die "server did not advertise a valid port"
+ unless Scalar::Util::looks_like_number($port);
+
+ $self->{'pid'} = $pid;
+ $self->{'port'} = $port;
+ $self->{'child'} = $read_fh;
+
+ note("OAuth provider (PID $pid) is listening on port $port\n");
+}
+
+=pod
+
+=item $server->stop()
+
+Sends SIGTERM to the authorization server and waits for it to exit.
+
+=cut
+
+sub stop
+{
+ my $self = shift;
+
+ note("Sending SIGTERM to OAuth provider PID: $self->{'pid'}\n");
+
+ kill(15, $self->{'pid'});
+ $self->{'pid'} = undef;
+
+ # Closing the popen() handle waits for the process to exit.
+ close($self->{'child'});
+ $self->{'child'} = undef;
+}
+
+=pod
+
+=back
+
+=cut
+
+1;
--- /dev/null
+#! /usr/bin/env python3
+#
+# A mock OAuth authorization server, designed to be invoked from
+# OAuth/Server.pm. This listens on an ephemeral port number (printed to stdout
+# so that the Perl tests can contact it) and runs as a daemon until it is
+# signaled.
+#
+
+import base64
+import http.server
+import json
+import os
+import sys
+import time
+import urllib.parse
+from collections import defaultdict
+
+
+class OAuthHandler(http.server.BaseHTTPRequestHandler):
+ """
+ Core implementation of the authorization server. The API is
+ inheritance-based, with entry points at do_GET() and do_POST(). See the
+ documentation for BaseHTTPRequestHandler.
+ """
+
+ JsonObject = dict[str, object] # TypeAlias is not available until 3.10
+
+ def _check_issuer(self):
+ """
+ Switches the behavior of the provider depending on the issuer URI.
+ """
+ self._alt_issuer = (
+ self.path.startswith("/alternate/")
+ or self.path == "/.well-known/oauth-authorization-server/alternate"
+ )
+ self._parameterized = self.path.startswith("/param/")
+
+ if self._alt_issuer:
+ # The /alternate issuer uses IETF-style .well-known URIs.
+ if self.path.startswith("/.well-known/"):
+ self.path = self.path.removesuffix("/alternate")
+ else:
+ self.path = self.path.removeprefix("/alternate")
+ elif self._parameterized:
+ self.path = self.path.removeprefix("/param")
+
+ def _check_authn(self):
+ """
+ Checks the expected value of the Authorization header, if any.
+ """
+ secret = self._get_param("expected_secret", None)
+ if secret is None:
+ return
+
+ assert "Authorization" in self.headers
+ method, creds = self.headers["Authorization"].split()
+
+ if method != "Basic":
+ raise RuntimeError(f"client used {method} auth; expected Basic")
+
+ username = urllib.parse.quote_plus(self.client_id)
+ password = urllib.parse.quote_plus(secret)
+ expected_creds = f"{username}:{password}"
+
+ if creds.encode() != base64.b64encode(expected_creds.encode()):
+ raise RuntimeError(
+ f"client sent '{creds}'; expected b64encode('{expected_creds}')"
+ )
+
+ def do_GET(self):
+ self._response_code = 200
+ self._check_issuer()
+
+ config_path = "/.well-known/openid-configuration"
+ if self._alt_issuer:
+ config_path = "/.well-known/oauth-authorization-server"
+
+ if self.path == config_path:
+ resp = self.config()
+ else:
+ self.send_error(404, "Not Found")
+ return
+
+ self._send_json(resp)
+
+ def _parse_params(self) -> dict[str, str]:
+ """
+ Parses apart the form-urlencoded request body and returns the resulting
+ dict. For use by do_POST().
+ """
+ size = int(self.headers["Content-Length"])
+ form = self.rfile.read(size)
+
+ assert self.headers["Content-Type"] == "application/x-www-form-urlencoded"
+ return urllib.parse.parse_qs(
+ form.decode("utf-8"),
+ strict_parsing=True,
+ keep_blank_values=True,
+ encoding="utf-8",
+ errors="strict",
+ )
+
+ @property
+ def client_id(self) -> str:
+ """
+ Returns the client_id sent in the POST body or the Authorization header.
+ self._parse_params() must have been called first.
+ """
+ if "client_id" in self._params:
+ return self._params["client_id"][0]
+
+ if "Authorization" not in self.headers:
+ raise RuntimeError("client did not send any client_id")
+
+ _, creds = self.headers["Authorization"].split()
+
+ decoded = base64.b64decode(creds).decode("utf-8")
+ username, _ = decoded.split(":", 1)
+
+ return urllib.parse.unquote_plus(username)
+
+ def do_POST(self):
+ self._response_code = 200
+ self._check_issuer()
+
+ self._params = self._parse_params()
+ if self._parameterized:
+ # Pull encoded test parameters out of the peer's client_id field.
+ # This is expected to be Base64-encoded JSON.
+ js = base64.b64decode(self.client_id)
+ self._test_params = json.loads(js)
+
+ self._check_authn()
+
+ if self.path == "/authorize":
+ resp = self.authorization()
+ elif self.path == "/token":
+ resp = self.token()
+ else:
+ self.send_error(404)
+ return
+
+ self._send_json(resp)
+
+ def _should_modify(self) -> bool:
+ """
+ Returns True if the client has requested a modification to this stage of
+ the exchange.
+ """
+ if not hasattr(self, "_test_params"):
+ return False
+
+ stage = self._test_params.get("stage")
+
+ return (
+ stage == "all"
+ or (
+ stage == "discovery"
+ and self.path == "/.well-known/openid-configuration"
+ )
+ or (stage == "device" and self.path == "/authorize")
+ or (stage == "token" and self.path == "/token")
+ )
+
+ def _get_param(self, name, default):
+ """
+ If the client has requested a modification to this stage (see
+ _should_modify()), this method searches the provided test parameters for
+ a key of the given name, and returns it if found. Otherwise the provided
+ default is returned.
+ """
+ if self._should_modify() and name in self._test_params:
+ return self._test_params[name]
+
+ return default
+
+ @property
+ def _content_type(self) -> str:
+ """
+ Returns "application/json" unless the test has requested something
+ different.
+ """
+ return self._get_param("content_type", "application/json")
+
+ @property
+ def _interval(self) -> int:
+ """
+ Returns 0 unless the test has requested something different.
+ """
+ return self._get_param("interval", 0)
+
+ @property
+ def _retry_code(self) -> str:
+ """
+ Returns "authorization_pending" unless the test has requested something
+ different.
+ """
+ return self._get_param("retry_code", "authorization_pending")
+
+ @property
+ def _uri_spelling(self) -> str:
+ """
+ Returns "verification_uri" unless the test has requested something
+ different.
+ """
+ return self._get_param("uri_spelling", "verification_uri")
+
+ @property
+ def _response_padding(self):
+ """
+ If the huge_response test parameter is set to True, returns a dict
+ containing a gigantic string value, which can then be folded into a JSON
+ response.
+ """
+ if not self._get_param("huge_response", False):
+ return dict()
+
+ return {"_pad_": "x" * 1024 * 1024}
+
+ @property
+ def _access_token(self):
+ """
+ The actual Bearer token sent back to the client on success. Tests may
+ override this with the "token" test parameter.
+ """
+ token = self._get_param("token", None)
+ if token is not None:
+ return token
+
+ token = "9243959234"
+ if self._alt_issuer:
+ token += "-alt"
+
+ return token
+
+ def _send_json(self, js: JsonObject) -> None:
+ """
+ Sends the provided JSON dict as an application/json response.
+ self._response_code can be modified to send JSON error responses.
+ """
+ resp = json.dumps(js).encode("ascii")
+ self.log_message("sending JSON response: %s", resp)
+
+ self.send_response(self._response_code)
+ self.send_header("Content-Type", self._content_type)
+ self.send_header("Content-Length", str(len(resp)))
+ self.end_headers()
+
+ self.wfile.write(resp)
+
+ def config(self) -> JsonObject:
+ port = self.server.socket.getsockname()[1]
+
+ issuer = f"http://localhost:{port}"
+ if self._alt_issuer:
+ issuer += "/alternate"
+ elif self._parameterized:
+ issuer += "/param"
+
+ return {
+ "issuer": issuer,
+ "token_endpoint": issuer + "/token",
+ "device_authorization_endpoint": issuer + "/authorize",
+ "response_types_supported": ["token"],
+ "subject_types_supported": ["public"],
+ "id_token_signing_alg_values_supported": ["RS256"],
+ "grant_types_supported": [
+ "authorization_code",
+ "urn:ietf:params:oauth:grant-type:device_code",
+ ],
+ }
+
+ @property
+ def _token_state(self):
+ """
+ A cached _TokenState object for the connected client (as determined by
+ the request's client_id), or a new one if it doesn't already exist.
+
+ This relies on the existence of a defaultdict attached to the server;
+ see main() below.
+ """
+ return self.server.token_state[self.client_id]
+
+ def _remove_token_state(self):
+ """
+ Removes any cached _TokenState for the current client_id. Call this
+ after the token exchange ends to get rid of unnecessary state.
+ """
+ if self.client_id in self.server.token_state:
+ del self.server.token_state[self.client_id]
+
+ def authorization(self) -> JsonObject:
+ uri = "https://example.com/"
+ if self._alt_issuer:
+ uri = "https://example.org/"
+
+ resp = {
+ "device_code": "postgres",
+ "user_code": "postgresuser",
+ self._uri_spelling: uri,
+ "expires_in": 5,
+ **self._response_padding,
+ }
+
+ interval = self._interval
+ if interval is not None:
+ resp["interval"] = interval
+ self._token_state.min_delay = interval
+ else:
+ self._token_state.min_delay = 5 # default
+
+ # Check the scope.
+ if "scope" in self._params:
+ assert self._params["scope"][0], "empty scopes should be omitted"
+
+ return resp
+
+ def token(self) -> JsonObject:
+ if err := self._get_param("error_code", None):
+ self._response_code = self._get_param("error_status", 400)
+
+ resp = {"error": err}
+ if desc := self._get_param("error_desc", ""):
+ resp["error_description"] = desc
+
+ return resp
+
+ if self._should_modify() and "retries" in self._test_params:
+ retries = self._test_params["retries"]
+
+ # Check to make sure the token interval is being respected.
+ now = time.monotonic()
+ if self._token_state.last_try is not None:
+ delay = now - self._token_state.last_try
+ assert (
+ delay > self._token_state.min_delay
+ ), f"client waited only {delay} seconds between token requests (expected {self._token_state.min_delay})"
+
+ self._token_state.last_try = now
+
+ # If we haven't reached the required number of retries yet, return a
+ # "pending" response.
+ if self._token_state.retries < retries:
+ self._token_state.retries += 1
+
+ self._response_code = 400
+ return {"error": self._retry_code}
+
+ # Clean up any retry tracking state now that the exchange is ending.
+ self._remove_token_state()
+
+ return {
+ "access_token": self._access_token,
+ "token_type": "bearer",
+ **self._response_padding,
+ }
+
+
+def main():
+ """
+ Starts the authorization server on localhost. The ephemeral port in use will
+ be printed to stdout.
+ """
+
+ s = http.server.HTTPServer(("127.0.0.1", 0), OAuthHandler)
+
+ # Attach a "cache" dictionary to the server to allow the OAuthHandlers to
+ # track state across token requests. The use of defaultdict ensures that new
+ # entries will be created automatically.
+ class _TokenState:
+ retries = 0
+ min_delay = None
+ last_try = None
+
+ s.token_state = defaultdict(_TokenState)
+
+ # Give the parent the port number to contact (this is also the signal that
+ # we're ready to receive requests).
+ port = s.socket.getsockname()[1]
+ print(port)
+
+ # stdout is closed to allow the parent to just "read to the end".
+ stdout = sys.stdout.fileno()
+ sys.stdout.close()
+ os.close(stdout)
+
+ s.serve_forever() # we expect our parent to send a termination signal
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * validator.c
+ * Test module for serverside OAuth token validation callbacks
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/test/modules/oauth_validator/validator.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "libpq/oauth.h"
+#include "miscadmin.h"
+#include "utils/guc.h"
+#include "utils/memutils.h"
+
+PG_MODULE_MAGIC;
+
+static void validator_startup(ValidatorModuleState *state);
+static void validator_shutdown(ValidatorModuleState *state);
+static bool validate_token(const ValidatorModuleState *state,
+ const char *token,
+ const char *role,
+ ValidatorModuleResult *result);
+
+/* Callback implementations (exercise all three) */
+static const OAuthValidatorCallbacks validator_callbacks = {
+ PG_OAUTH_VALIDATOR_MAGIC,
+
+ .startup_cb = validator_startup,
+ .shutdown_cb = validator_shutdown,
+ .validate_cb = validate_token
+};
+
+/* GUCs */
+static char *authn_id = NULL;
+static bool authorize_tokens = true;
+
+/*---
+ * Extension entry point. Sets up GUCs for use by tests:
+ *
+ * - oauth_validator.authn_id Sets the user identifier to return during token
+ * validation. Defaults to the username in the
+ * startup packet.
+ *
+ * - oauth_validator.authorize_tokens
+ * Sets whether to successfully validate incoming
+ * tokens. Defaults to true.
+ */
+void
+_PG_init(void)
+{
+ DefineCustomStringVariable("oauth_validator.authn_id",
+ "Authenticated identity to use for future connections",
+ NULL,
+ &authn_id,
+ NULL,
+ PGC_SIGHUP,
+ 0,
+ NULL, NULL, NULL);
+ DefineCustomBoolVariable("oauth_validator.authorize_tokens",
+ "Should tokens be marked valid?",
+ NULL,
+ &authorize_tokens,
+ true,
+ PGC_SIGHUP,
+ 0,
+ NULL, NULL, NULL);
+
+ MarkGUCPrefixReserved("oauth_validator");
+}
+
+/*
+ * Validator module entry point.
+ */
+const OAuthValidatorCallbacks *
+_PG_oauth_validator_module_init(void)
+{
+ return &validator_callbacks;
+}
+
+#define PRIVATE_COOKIE ((void *) 13579)
+
+/*
+ * Startup callback, to set up private data for the validator.
+ */
+static void
+validator_startup(ValidatorModuleState *state)
+{
+ /*
+ * Make sure the server is correctly setting sversion. (Real modules
+ * should not do this; it would defeat upgrade compatibility.)
+ */
+ if (state->sversion != PG_VERSION_NUM)
+ elog(ERROR, "oauth_validator: sversion set to %d", state->sversion);
+
+ state->private_data = PRIVATE_COOKIE;
+}
+
+/*
+ * Shutdown callback, to tear down the validator.
+ */
+static void
+validator_shutdown(ValidatorModuleState *state)
+{
+ /* Check to make sure our private state still exists. */
+ if (state->private_data != PRIVATE_COOKIE)
+ elog(PANIC, "oauth_validator: private state cookie changed to %p in shutdown",
+ state->private_data);
+}
+
+/*
+ * Validator implementation. Logs the incoming data and authorizes the token by
+ * default; the behavior can be modified via the module's GUC settings.
+ */
+static bool
+validate_token(const ValidatorModuleState *state,
+ const char *token, const char *role,
+ ValidatorModuleResult *res)
+{
+ /* Check to make sure our private state still exists. */
+ if (state->private_data != PRIVATE_COOKIE)
+ elog(ERROR, "oauth_validator: private state cookie changed to %p in validate",
+ state->private_data);
+
+ elog(LOG, "oauth_validator: token=\"%s\", role=\"%s\"", token, role);
+ elog(LOG, "oauth_validator: issuer=\"%s\", scope=\"%s\"",
+ MyProcPort->hba->oauth_issuer,
+ MyProcPort->hba->oauth_scope);
+
+ res->authorized = authorize_tokens;
+ if (authn_id)
+ res->authn_id = pstrdup(authn_id);
+ else
+ res->authn_id = pstrdup(role);
+
+ return true;
+}
If this regular expression is set, matches it with the output generated.
+=item expected_stderr => B<value>
+
+If this regular expression is set, matches it against the standard error
+stream; otherwise stderr must be empty.
+
=item log_like => [ qr/required message/ ]
=item log_unlike => [ qr/prohibited message/ ]
like($stdout, $params{expected_stdout}, "$test_name: stdout matches");
}
- is($stderr, "", "$test_name: no stderr");
+ if (defined($params{expected_stderr}))
+ {
+ if (like(
+ $stderr, $params{expected_stderr},
+ "$test_name: stderr matches")
+ && ($ret != 0))
+ {
+ # In this case (failing test but matching stderr) we'll have
+ # swallowed the output needed to debug. Put it back into the logs.
+ diag("$test_name: full stderr:\n" . $stderr);
+ }
+ }
+ else
+ {
+ is($stderr, "", "$test_name: no stderr");
+ }
$self->log_check($test_name, $log_location, %params);
}
# Protect wrapping in CATALOG()
$source =~ s!^(CATALOG\(.*)$!/*$1*/!gm;
+ # Treat a CURL_IGNORE_DEPRECATION() as braces for the purposes of
+ # indentation. (The recursive regex comes from the perlre documentation; it
+ # matches balanced parentheses as group $1 and the contents as group $2.)
+ my $curlopen = '{ /* CURL_IGNORE_DEPRECATION */';
+ my $curlclose = '} /* CURL_IGNORE_DEPRECATION */';
+ $source =~
+ s!^[ \t]+CURL_IGNORE_DEPRECATION(\(((?:(?>[^()]+)|(?1))*)\))!$curlopen$2$curlclose!gms;
+
return $source;
}
$source =~ s!^/\* Open extern "C" \*/$!{!gm;
$source =~ s!^/\* Close extern "C" \*/$!}!gm;
+ # Restore the CURL_IGNORE_DEPRECATION() macro, keeping in mind that our
+ # markers may have been re-indented.
+ $source =~
+ s!{[ \t]+/\* CURL_IGNORE_DEPRECATION \*/!CURL_IGNORE_DEPRECATION(!gm;
+ $source =~ s!}[ \t]+/\* CURL_IGNORE_DEPRECATION \*/!)!gm;
+
## Comments
# Undo change of dash-protected block comments
CTECycleClause
CTEMaterialize
CTESearchClause
+CURL
+CURLM
+CURLoption
CV
CachedExpression
CachedPlan
NumericSortSupport
NumericSumAccum
NumericVar
+OAuthValidatorCallbacks
OM_uint32
OP
OSAPerGroupState
PG_Locale_Strategy
PG_Lock_Status
PG_init_t
+PGauthData
PGcancel
PGcancelConn
PGcmdQueueEntry
PGdataValue
PGlobjfuncs
PGnotify
+PGoauthBearerRequest
PGpipelineStatus
+PGpromptOAuthDevice
PGresAttDesc
PGresAttValue
PGresParamDesc
PQEnvironmentOption
PQExpBuffer
PQExpBufferData
+PQauthDataHook_type
PQcommMethods
PQconninfoOption
PQnoticeProcessor
VacuumStmt
ValidIOData
ValidateIndexState
+ValidatorModuleState
+ValidatorModuleResult
ValuesScan
ValuesScanState
Var
f_smgr
fasthash_state
fd_set
+fe_oauth_state
fe_scram_state
fe_scram_state_enum
fetch_range_request