diff --git a/NOTICE.txt b/NOTICE.txt
new file mode 100644
index 0000000..bb8b625
--- /dev/null
+++ b/NOTICE.txt
@@ -0,0 +1,8 @@
+This work integrates
+
+pgjdbc - https://github.com/pgjdbc/pgjdbc (as of 1 Apr 2024)
+stringprep - Stringprep (RFC 3454) Java implementation https://github.com/ongres/stringprep (as of 1 Apr 2024)
+saslprep - a profile of stringprep
+scram-client - SCRAM (RFC 5802) Java implementation https://github.com/ongres/scram (as of 1 Apr 2024)
+
+All of those projects where modfied for Java 21+ with JPMS info.
diff --git a/gradle.properties b/gradle.properties
index dbb89c5..58fc89c 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -1,4 +1,3 @@
group = org.xbib.jdbc
name = pgjdbc
version = 42.7.4.0
-
diff --git a/gradle/test/junit5.gradle b/gradle/test/junit5.gradle
index 6cace6f..a974336 100644
--- a/gradle/test/junit5.gradle
+++ b/gradle/test/junit5.gradle
@@ -8,8 +8,8 @@ dependencies {
test {
useJUnitPlatform()
- failFast = true
- systemProperty 'java.util.logging.config.file', 'src/test/resources/logging.properties'
+ failFast = false
+ //systemProperty 'java.util.logging.config.file', 'src/test/resources/logging.properties'
testLogging {
events 'STARTED', 'PASSED', 'FAILED', 'SKIPPED'
showStandardStreams = true
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
index d64cd49..e644113 100644
Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index e6aba25..b82aa23 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -1,6 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
diff --git a/gradlew.bat b/gradlew.bat
index 6689b85..7101f8e 100644
--- a/gradlew.bat
+++ b/gradlew.bat
@@ -43,11 +43,11 @@ set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
-echo.
-echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
+echo. 1>&2
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
goto fail
@@ -57,11 +57,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
-echo.
-echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
+echo. 1>&2
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
goto fail
diff --git a/pgjdbc/build.gradle b/pgjdbc/build.gradle
index a6e49ca..5e9edf8 100644
--- a/pgjdbc/build.gradle
+++ b/pgjdbc/build.gradle
@@ -1,3 +1,30 @@
dependencies {
api project(':scram-client')
+ testImplementation testLibs.junit.runner
+ testImplementation testLibs.junit.jupiter.engine
+ testImplementation testLibs.bytebuddy
+ testImplementation testLibs.bytebuddy.agent
+ testImplementation testLibs.classloader.leak.test
+ testImplementation testLibs.testcontainers
+ testImplementation testLibs.testcontainers.junit.jupiter
+ testImplementation testLibs.testcontainers.postgresql
+}
+
+test {
+ systemProperty 'username', 'test'
+ systemProperty 'server', 'localhost'
+ systemProperty 'port', '5432'
+ systemProperty 'secondaryServer1', 'localhost'
+ systemProperty 'secondaryPort1', '5433'
+ systemProperty 'secondaryServer2', 'localhost'
+ systemProperty 'secondaryPort2', '5434'
+ systemProperty 'database', 'test'
+ systemProperty 'username', 'test'
+ systemProperty 'password', 'test'
+ systemProperty 'privilegedUser', 'postgres'
+ systemProperty 'privilegedPassword', ''
+ systemProperty 'sspiusername', 'testsspi'
+ systemProperty 'preparethreshold', '5'
+ systemProperty 'protocolVersion', '0'
+ systemProperty 'sslpassword', 'sslpwd'
}
diff --git a/pgjdbc/src/main/java/org/postgresql/Driver.java b/pgjdbc/src/main/java/org/postgresql/Driver.java
index 2fac15d..e85c909 100644
--- a/pgjdbc/src/main/java/org/postgresql/Driver.java
+++ b/pgjdbc/src/main/java/org/postgresql/Driver.java
@@ -59,738 +59,736 @@ import java.util.logging.Logger;
@SuppressWarnings("try")
public class Driver implements java.sql.Driver {
- private static Driver registeredDriver;
- private static final Logger PARENT_LOGGER = Logger.getLogger("org.postgresql");
- private static final Logger LOGGER = Logger.getLogger("org.postgresql.Driver");
- private static final SharedTimer SHARED_TIMER = new SharedTimer();
+ private static final Logger PARENT_LOGGER = Logger.getLogger("org.postgresql");
+ private static final Logger LOGGER = Logger.getLogger("org.postgresql.Driver");
+ private static final SharedTimer SHARED_TIMER = new SharedTimer();
+ private static Driver registeredDriver;
- static {
- try {
- // moved the registerDriver from the constructor to here
- // because some clients call the driver themselves (I know, as
- // my early jdbc work did - and that was based on other examples).
- // Placing it here, means that the driver is registered once only.
- register();
- } catch (SQLException e) {
- throw new ExceptionInInitializerError(e);
- }
- }
-
- // Helper to retrieve default properties from classloader resource
- // properties files.
- private Properties defaultProperties;
-
- private final ResourceLock lock = new ResourceLock();
-
- public Driver() {
- }
-
- private Properties getDefaultProperties() throws IOException {
- try (ResourceLock ignore = lock.obtain()) {
- if (defaultProperties != null) {
- return defaultProperties;
- }
-
- // Make sure we load properties with the maximum possible privileges.
- try {
- defaultProperties =
- doPrivileged(new PrivilegedExceptionAction() {
- @Override
- public Properties run() throws IOException {
- return loadDefaultProperties();
- }
- });
- } catch (PrivilegedActionException e) {
- Exception ex = e.getException();
- if (ex instanceof IOException) {
- throw (IOException) ex;
+ static {
+ try {
+ // moved the registerDriver from the constructor to here
+ // because some clients call the driver themselves (I know, as
+ // my early jdbc work did - and that was based on other examples).
+ // Placing it here, means that the driver is registered once only.
+ register();
+ } catch (SQLException e) {
+ throw new ExceptionInInitializerError(e);
}
- throw new RuntimeException(e);
- } catch (Throwable e) {
- if (e instanceof IOException) {
- throw (IOException) e;
- }
- if (e instanceof RuntimeException) {
- throw (RuntimeException) e;
- }
- if (e instanceof Error) {
- throw (Error) e;
- }
- throw new RuntimeException(e);
- }
-
- return defaultProperties;
- }
- }
-
- @SuppressWarnings("unchecked")
- private static T doPrivileged(PrivilegedExceptionAction action) throws Throwable {
- try {
- Class> accessControllerClass = Class.forName("java.security.AccessController");
- Method doPrivileged = accessControllerClass.getMethod("doPrivileged",
- PrivilegedExceptionAction.class);
- return (T) doPrivileged.invoke(null, action);
- } catch (ClassNotFoundException e) {
- return action.run();
- } catch (InvocationTargetException e) {
- throw e.getCause();
- }
- }
-
- private Properties loadDefaultProperties() throws IOException {
- Properties merged = new Properties();
-
- try {
- PGProperty.USER.set(merged, System.getProperty("user.name"));
- } catch (SecurityException se) {
- // We're just trying to set a default, so if we can't
- // it's not a big deal.
}
- // If we are loaded by the bootstrap classloader, getClassLoader()
- // may return null. In that case, try to fall back to the system
- // classloader.
- //
- // We should not need to catch SecurityException here as we are
- // accessing either our own classloader, or the system classloader
- // when our classloader is null. The ClassLoader javadoc claims
- // neither case can throw SecurityException.
- ClassLoader cl = getClass().getClassLoader();
- if (cl == null) {
- LOGGER.log(Level.FINE, "Can't find our classloader for the Driver; "
- + "attempt to use the system class loader");
- cl = ClassLoader.getSystemClassLoader();
- }
-
- if (cl == null) {
- LOGGER.log(Level.WARNING, "Can't find a classloader for the Driver; not loading driver "
- + "configuration from org/postgresql/driverconfig.properties");
- return merged; // Give up on finding defaults.
- }
-
- LOGGER.log(Level.FINE, "Loading driver configuration via classloader {0}", cl);
-
- // When loading the driver config files we don't want settings found
- // in later files in the classpath to override settings specified in
- // earlier files. To do this we've got to read the returned
- // Enumeration into temporary storage.
- ArrayList urls = new ArrayList<>();
- Enumeration urlEnum = cl.getResources("org/postgresql/driverconfig.properties");
- while (urlEnum.hasMoreElements()) {
- urls.add(urlEnum.nextElement());
- }
-
- for (int i = urls.size() - 1; i >= 0; i--) {
- URL url = urls.get(i);
- LOGGER.log(Level.FINE, "Loading driver configuration from: {0}", url);
- InputStream is = url.openStream();
- merged.load(is);
- is.close();
- }
-
- return merged;
- }
-
- /**
- *
Try to make a database connection to the given URL. The driver should return "null" if it
- * realizes it is the wrong kind of driver to connect to the given URL. This will be common, as
- * when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each
- * loaded driver in turn.
- *
- *
The driver should raise an SQLException if it is the right driver to connect to the given URL,
- * but has trouble connecting to the database.
- *
- *
The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as
- * connection arguments.
- *
- *
- *
user - (required) The user to connect as
- *
password - (optional) The password for the user
- *
ssl -(optional) Use SSL when connecting to the server
- *
readOnly - (optional) Set connection to read-only by default
- *
charSet - (optional) The character set to be used for converting to/from
- * the database to unicode. If multibyte is enabled on the server then the character set of the
- * database is used as the default, otherwise the jvm character encoding is used as the default.
- * This value is only used when connecting to a 7.2 or older server.
- *
loglevel - (optional) Enable logging of messages from the driver. The value is an integer
- * from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to
- * DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.
- *
compatible - (optional) This is used to toggle between different functionality
- * as it changes across different releases of the jdbc driver code. The values here are versions
- * of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on
- * LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in
- * functionality could be disabled by setting the compatible level to be "7.1", in which case the
- * driver will revert to the 7.1 functionality.
- *
- *
- *
Normally, at least "user" and "password" properties should be included in the properties. For a
- * list of supported character encoding , see
- * http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will
- * probably want to have set up the Postgres database itself to use the same encoding, with the
- * {@code -E } argument to createdb.
- *
- * @param url the URL of the database to connect to
- * @param info a list of arbitrary tag/value pairs as connection arguments
- * @return a connection to the URL or null if it isnt us
- * @exception SQLException if a database access error occurs or the url is
- * {@code null}
- * @see java.sql.Driver#connect
- */
- @Override
- public Connection connect(String url, Properties info) throws SQLException {
- if (url == null) {
- throw new SQLException("url is null");
- }
- // get defaults
- Properties defaults;
-
- if (!url.startsWith("jdbc:postgresql:")) {
- return null;
- }
- try {
- defaults = getDefaultProperties();
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Error loading default settings from driverconfig.properties"),
- PSQLState.UNEXPECTED_ERROR, ioe);
- }
-
- // override defaults with provided properties
- Properties props = new Properties(defaults);
- if (info != null) {
- Set e = info.stringPropertyNames();
- for (String propName : e) {
- String propValue = info.getProperty(propName);
- if (propValue == null) {
- throw new PSQLException(
- GT.tr("Properties for the driver contains a non-string value for the key ")
- + propName,
- PSQLState.UNEXPECTED_ERROR);
- }
- props.setProperty(propName, propValue);
- }
- }
- // parse URL and add more properties
- if ((props = parseURL(url, props)) == null) {
- throw new PSQLException(
- GT.tr("Unable to parse URL {0}", url),
- PSQLState.UNEXPECTED_ERROR);
- }
- try {
-
- LOGGER.log(Level.FINE, "Connecting with URL: {0}", url);
-
- // Enforce login timeout, if specified, by running the connection
- // attempt in a separate thread. If we hit the timeout without the
- // connection completing, we abandon the connection attempt in
- // the calling thread, but the separate thread will keep trying.
- // Eventually, the separate thread will either fail or complete
- // the connection; at that point we clean up the connection if
- // we managed to establish one after all. See ConnectThread for
- // more details.
- long timeout = timeout(props);
- if (timeout <= 0) {
- return makeConnection(url, props);
- }
-
- ConnectThread ct = new ConnectThread(url, props);
- Thread thread = new Thread(ct, "PostgreSQL JDBC driver connection thread");
- thread.setDaemon(true); // Don't prevent the VM from shutting down
- thread.start();
- return ct.getResult(timeout);
- } catch (PSQLException ex1) {
- LOGGER.log(Level.FINE, "Connection error: ", ex1);
- // re-throw the exception, otherwise it will be caught next, and a
- // org.postgresql.unusual error will be returned instead.
- throw ex1;
- } catch (Exception ex2) {
- if ("java.security.AccessControlException".equals(ex2.getClass().getName())) {
- // java.security.AccessControlException has been deprecated for removal, so compare the class name
- throw new PSQLException(
- GT.tr(
- "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."),
- PSQLState.UNEXPECTED_ERROR, ex2);
- }
- LOGGER.log(Level.FINE, "Unexpected connection error: ", ex2);
- throw new PSQLException(
- GT.tr(
- "Something unusual has occurred to cause the driver to fail. Please report this exception."),
- PSQLState.UNEXPECTED_ERROR, ex2);
- }
- }
-
- /**
- * this is an empty method left here for graalvm
- * we removed the ability to setup the logger from properties
- * due to a security issue
- * @param props Connection Properties
- */
- private void setupLoggerFromProperties(final Properties props) {
- }
-
- /**
- * Perform a connect in a separate thread; supports getting the results from the original thread
- * while enforcing a login timeout.
- */
- private static class ConnectThread implements Runnable {
private final ResourceLock lock = new ResourceLock();
- private final Condition lockCondition = lock.newCondition();
+ // Helper to retrieve default properties from classloader resource
+ // properties files.
+ private Properties defaultProperties;
- ConnectThread(String url, Properties props) {
- this.url = url;
- this.props = props;
+ public Driver() {
}
- @Override
- public void run() {
- Connection conn;
- Throwable error;
-
- try {
- conn = makeConnection(url, props);
- error = null;
- } catch (Throwable t) {
- conn = null;
- error = t;
- }
-
- try (ResourceLock ignore = lock.obtain()) {
- if (abandoned) {
- if (conn != null) {
- try {
- conn.close();
- } catch (SQLException e) {
- }
- }
- } else {
- result = conn;
- resultException = error;
- lockCondition.signal();
+ @SuppressWarnings("unchecked")
+ private static T doPrivileged(PrivilegedExceptionAction action) throws Throwable {
+ try {
+ Class> accessControllerClass = Class.forName("java.security.AccessController");
+ Method doPrivileged = accessControllerClass.getMethod("doPrivileged",
+ PrivilegedExceptionAction.class);
+ return (T) doPrivileged.invoke(null, action);
+ } catch (ClassNotFoundException e) {
+ return action.run();
+ } catch (InvocationTargetException e) {
+ throw e.getCause();
}
- }
}
/**
- * Get the connection result from this (assumed running) thread. If the timeout is reached
- * without a result being available, a SQLException is thrown.
+ * Create a connection from URL and properties. Always does the connection work in the current
+ * thread without enforcing a timeout, regardless of any timeout specified in the properties.
*
- * @param timeout timeout in milliseconds
- * @return the new connection, if successful
- * @throws SQLException if a connection error occurs or the timeout is reached
+ * @param url the original URL
+ * @param props the parsed/defaulted connection properties
+ * @return a new connection
+ * @throws SQLException if the connection could not be made
*/
- public Connection getResult(long timeout) throws SQLException {
- long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout;
- try (ResourceLock ignore = lock.obtain()) {
- while (true) {
- if (result != null) {
- return result;
- }
+ private static Connection makeConnection(String url, Properties props) throws SQLException {
+ return new PgConnection(hostSpecs(props), props, url);
+ }
- Throwable resultException = this.resultException;
- if (resultException != null) {
- if (resultException instanceof SQLException) {
- resultException.fillInStackTrace();
- throw (SQLException) resultException;
- } else {
- throw new PSQLException(
- GT.tr(
- "Something unusual has occurred to cause the driver to fail. Please report this exception."),
- PSQLState.UNEXPECTED_ERROR, resultException);
+ /**
+ * Returns the server version series of this driver and the specific build number.
+ *
+ * @return JDBC driver version
+ * @deprecated use {@link #getMajorVersion()} and {@link #getMinorVersion()} instead
+ */
+ @Deprecated
+ public static String getVersion() {
+ return DriverInfo.DRIVER_FULL_NAME;
+ }
+
+ /**
+ * Constructs a new DriverURL, splitting the specified URL into its component parts.
+ *
+ * @param url JDBC URL to parse
+ * @param defaults Default properties
+ * @return Properties with elements added from the url
+ */
+ public static Properties parseURL(String url, Properties defaults) {
+ // priority 1 - URL values
+ Properties priority1Url = new Properties();
+ // priority 2 - Properties given as argument to DriverManager.getConnection()
+ // argument "defaults" EXCLUDING defaults
+ // priority 3 - Values retrieved by "service"
+ Properties priority3Service = new Properties();
+ // priority 4 - Properties loaded by Driver.loadDefaultProperties() (user, org/postgresql/driverconfig.properties)
+ // argument "defaults" INCLUDING defaults
+ // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
+
+ String urlServer = url;
+ String urlArgs = "";
+
+ int qPos = url.indexOf('?');
+ if (qPos != -1) {
+ urlServer = url.substring(0, qPos);
+ urlArgs = url.substring(qPos + 1);
+ }
+
+ if (!urlServer.startsWith("jdbc:postgresql:")) {
+ LOGGER.log(Level.FINE, "JDBC URL must start with \"jdbc:postgresql:\" but was: {0}", url);
+ return null;
+ }
+ urlServer = urlServer.substring("jdbc:postgresql:".length());
+
+ if ("//".equals(urlServer) || "///".equals(urlServer)) {
+ urlServer = "";
+ } else if (urlServer.startsWith("//")) {
+ urlServer = urlServer.substring(2);
+ long slashCount = urlServer.chars().filter(ch -> ch == '/').count();
+ if (slashCount > 1) {
+ LOGGER.log(Level.WARNING, "JDBC URL contains too many / characters: {0}", url);
+ return null;
}
- }
+ int slash = urlServer.indexOf('/');
+ if (slash == -1) {
+ LOGGER.log(Level.WARNING, "JDBC URL must contain a / at the end of the host or port: {0}", url);
+ return null;
+ }
+ if (!urlServer.endsWith("/")) {
+ String value = urlDecode(urlServer.substring(slash + 1));
+ if (value == null) {
+ return null;
+ }
+ PGProperty.PG_DBNAME.set(priority1Url, value);
+ }
+ urlServer = urlServer.substring(0, slash);
- long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
- if (delay <= 0) {
- abandoned = true;
- throw new PSQLException(GT.tr("Connection attempt timed out."),
- PSQLState.CONNECTION_UNABLE_TO_CONNECT);
- }
-
- try {
- lockCondition.await(delay, TimeUnit.MILLISECONDS);
- } catch (InterruptedException ie) {
-
- // reset the interrupt flag
- Thread.currentThread().interrupt();
- abandoned = true;
-
- // throw an unchecked exception which will hopefully not be ignored by the calling code
- throw new RuntimeException(GT.tr("Interrupted while attempting to connect."));
- }
- }
- }
- }
-
- private final String url;
- private final Properties props;
- private Connection result;
- private Throwable resultException;
- private boolean abandoned;
- }
-
- /**
- * Create a connection from URL and properties. Always does the connection work in the current
- * thread without enforcing a timeout, regardless of any timeout specified in the properties.
- *
- * @param url the original URL
- * @param props the parsed/defaulted connection properties
- * @return a new connection
- * @throws SQLException if the connection could not be made
- */
- private static Connection makeConnection(String url, Properties props) throws SQLException {
- return new PgConnection(hostSpecs(props), props, url);
- }
-
- /**
- * Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers
- * will return true if they understand the subprotocol specified in the URL and false if they
- * don't. Our protocols start with jdbc:postgresql:
- *
- * @param url the URL of the driver
- * @return true if this driver accepts the given URL
- * @see java.sql.Driver#acceptsURL
- */
- @Override
- public boolean acceptsURL(String url) {
- return parseURL(url, null) != null;
- }
-
- /**
- *
The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties
- * it should prompt a human for in order to get enough information to connect to a database.
- *
- *
Note that depending on the values the human has supplied so far, additional values may become
- * necessary, so it may be necessary to iterate through several calls to getPropertyInfo
- *
- * @param url the Url of the database to connect to
- * @param info a proposed list of tag/value pairs that will be sent on connect open.
- * @return An array of DriverPropertyInfo objects describing possible properties. This array may
- * be an empty array if no properties are required
- * @see java.sql.Driver#getPropertyInfo
- */
- @Override
- public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) {
- Properties copy = new Properties(info);
- Properties parse = parseURL(url, copy);
- if (parse != null) {
- copy = parse;
- }
-
- PGProperty[] knownProperties = PGProperty.values();
- DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length];
- for (int i = 0; i < props.length; i++) {
- props[i] = knownProperties[i].toDriverPropertyInfo(copy);
- }
-
- return props;
- }
-
- @Override
- public int getMajorVersion() {
- return DriverInfo.MAJOR_VERSION;
- }
-
- @Override
- public int getMinorVersion() {
- return DriverInfo.MINOR_VERSION;
- }
-
- /**
- * Returns the server version series of this driver and the specific build number.
- *
- * @return JDBC driver version
- * @deprecated use {@link #getMajorVersion()} and {@link #getMinorVersion()} instead
- */
- @Deprecated
- public static String getVersion() {
- return DriverInfo.DRIVER_FULL_NAME;
- }
-
- /**
- *
Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true"
- * here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC
- * compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.
- *
- *
For PostgreSQL, this is not yet possible, as we are not SQL92 compliant (yet).
- */
- @Override
- public boolean jdbcCompliant() {
- return false;
- }
-
- /**
- * Constructs a new DriverURL, splitting the specified URL into its component parts.
- *
- * @param url JDBC URL to parse
- * @param defaults Default properties
- * @return Properties with elements added from the url
- */
- public static Properties parseURL(String url, Properties defaults) {
- // priority 1 - URL values
- Properties priority1Url = new Properties();
- // priority 2 - Properties given as argument to DriverManager.getConnection()
- // argument "defaults" EXCLUDING defaults
- // priority 3 - Values retrieved by "service"
- Properties priority3Service = new Properties();
- // priority 4 - Properties loaded by Driver.loadDefaultProperties() (user, org/postgresql/driverconfig.properties)
- // argument "defaults" INCLUDING defaults
- // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
-
- String urlServer = url;
- String urlArgs = "";
-
- int qPos = url.indexOf('?');
- if (qPos != -1) {
- urlServer = url.substring(0, qPos);
- urlArgs = url.substring(qPos + 1);
- }
-
- if (!urlServer.startsWith("jdbc:postgresql:")) {
- LOGGER.log(Level.FINE, "JDBC URL must start with \"jdbc:postgresql:\" but was: {0}", url);
- return null;
- }
- urlServer = urlServer.substring("jdbc:postgresql:".length());
-
- if ("//".equals(urlServer) || "///".equals(urlServer)) {
- urlServer = "";
- } else if (urlServer.startsWith("//")) {
- urlServer = urlServer.substring(2);
- long slashCount = urlServer.chars().filter(ch -> ch == '/').count();
- if (slashCount > 1) {
- LOGGER.log(Level.WARNING, "JDBC URL contains too many / characters: {0}", url);
- return null;
- }
- int slash = urlServer.indexOf('/');
- if (slash == -1) {
- LOGGER.log(Level.WARNING, "JDBC URL must contain a / at the end of the host or port: {0}", url);
- return null;
- }
- if (!urlServer.endsWith("/")) {
- String value = urlDecode(urlServer.substring(slash + 1));
- if (value == null) {
- return null;
- }
- PGProperty.PG_DBNAME.set(priority1Url, value);
- }
- urlServer = urlServer.substring(0, slash);
-
- String[] addresses = urlServer.split(",");
- StringBuilder hosts = new StringBuilder();
- StringBuilder ports = new StringBuilder();
- for (String address : addresses) {
- int portIdx = address.lastIndexOf(':');
- if (portIdx != -1 && address.lastIndexOf(']') < portIdx) {
- String portStr = address.substring(portIdx + 1);
- ports.append(portStr);
- CharSequence hostStr = address.subSequence(0, portIdx);
- if (hostStr.length() == 0) {
- hosts.append(PGProperty.PG_HOST.getDefaultValue());
- } else {
- hosts.append(hostStr);
- }
+ String[] addresses = urlServer.split(",");
+ StringBuilder hosts = new StringBuilder();
+ StringBuilder ports = new StringBuilder();
+ for (String address : addresses) {
+ int portIdx = address.lastIndexOf(':');
+ if (portIdx != -1 && address.lastIndexOf(']') < portIdx) {
+ String portStr = address.substring(portIdx + 1);
+ ports.append(portStr);
+ CharSequence hostStr = address.subSequence(0, portIdx);
+ if (hostStr.length() == 0) {
+ hosts.append(PGProperty.PG_HOST.getDefaultValue());
+ } else {
+ hosts.append(hostStr);
+ }
+ } else {
+ ports.append(PGProperty.PG_PORT.getDefaultValue());
+ hosts.append(address);
+ }
+ ports.append(',');
+ hosts.append(',');
+ }
+ ports.setLength(ports.length() - 1);
+ hosts.setLength(hosts.length() - 1);
+ PGProperty.PG_HOST.set(priority1Url, hosts.toString());
+ PGProperty.PG_PORT.set(priority1Url, ports.toString());
+ } else if (urlServer.startsWith("/")) {
+ return null;
} else {
- ports.append(PGProperty.PG_PORT.getDefaultValue());
- hosts.append(address);
+ String value = urlDecode(urlServer);
+ if (value == null) {
+ return null;
+ }
+ priority1Url.setProperty(PGProperty.PG_DBNAME.getName(), value);
+ }
+
+ // parse the args part of the url
+ String[] args = urlArgs.split("&");
+ String serviceName = null;
+ for (String token : args) {
+ if (token.isEmpty()) {
+ continue;
+ }
+ int pos = token.indexOf('=');
+ if (pos == -1) {
+ priority1Url.setProperty(token, "");
+ } else {
+ String pName = PGPropertyUtil.translatePGServiceToPGProperty(token.substring(0, pos));
+ String pValue = urlDecode(token.substring(pos + 1));
+ if (pValue == null) {
+ return null;
+ }
+ if (PGProperty.SERVICE.getName().equals(pName)) {
+ serviceName = pValue;
+ } else {
+ priority1Url.setProperty(pName, pValue);
+ }
+ }
+ }
+
+ // load pg_service.conf
+ if (serviceName != null) {
+ LOGGER.log(Level.FINE, "Processing option [?service={0}]", serviceName);
+ Properties result = PgServiceConfParser.getServiceProperties(serviceName);
+ if (result == null) {
+ LOGGER.log(Level.WARNING, "Definition of service [{0}] not found", serviceName);
+ return null;
+ }
+ priority3Service.putAll(result);
+ }
+
+ // combine result based on order of priority
+ Properties result = new Properties();
+ result.putAll(priority1Url);
+ if (defaults != null) {
+ // priority 2 - forEach() returns all entries EXCEPT defaults
+ defaults.forEach(result::putIfAbsent);
+ }
+ priority3Service.forEach(result::putIfAbsent);
+ if (defaults != null) {
+ // priority 4 - stringPropertyNames() returns all entries INCLUDING defaults
+ defaults.stringPropertyNames().forEach(s -> result.putIfAbsent(s, defaults.getProperty(s)));
+ }
+ // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
+ result.putIfAbsent(PGProperty.PG_PORT.getName(), PGProperty.PG_PORT.getDefaultValue());
+ result.putIfAbsent(PGProperty.PG_HOST.getName(), PGProperty.PG_HOST.getDefaultValue());
+ if (PGProperty.USER.getOrDefault(result) != null) {
+ result.putIfAbsent(PGProperty.PG_DBNAME.getName(), PGProperty.USER.getOrDefault(result));
+ }
+
+ // consistency check
+ if (!PGPropertyUtil.propertiesConsistencyCheck(result)) {
+ return null;
+ }
+
+ // try to load .pgpass if password is missing
+ if (PGProperty.PASSWORD.getOrDefault(result) == null) {
+ String password = PgPassParser.getPassword(
+ PGProperty.PG_HOST.getOrDefault(result), PGProperty.PG_PORT.getOrDefault(result), PGProperty.PG_DBNAME.getOrDefault(result), PGProperty.USER.getOrDefault(result)
+ );
+ if (password != null && !password.isEmpty()) {
+ PGProperty.PASSWORD.set(result, password);
+ }
+ }
+ //
+ return result;
+ }
+
+ // decode url, on failure log and return null
+ private static String urlDecode(String url) {
+ try {
+ return URLCoder.decode(url);
+ } catch (IllegalArgumentException e) {
+ LOGGER.log(Level.FINE, "Url [{0}] parsing failed with error [{1}]", new Object[]{url, e.getMessage()});
}
- ports.append(',');
- hosts.append(',');
- }
- ports.setLength(ports.length() - 1);
- hosts.setLength(hosts.length() - 1);
- PGProperty.PG_HOST.set(priority1Url, hosts.toString());
- PGProperty.PG_PORT.set(priority1Url, ports.toString());
- } else if (urlServer.startsWith("/")) {
- return null;
- } else {
- String value = urlDecode(urlServer);
- if (value == null) {
return null;
- }
- priority1Url.setProperty(PGProperty.PG_DBNAME.getName(), value);
}
- // parse the args part of the url
- String[] args = urlArgs.split("&");
- String serviceName = null;
- for (String token : args) {
- if (token.isEmpty()) {
- continue;
- }
- int pos = token.indexOf('=');
- if (pos == -1) {
- priority1Url.setProperty(token, "");
- } else {
- String pName = PGPropertyUtil.translatePGServiceToPGProperty(token.substring(0, pos));
- String pValue = urlDecode(token.substring(pos + 1));
- if (pValue == null) {
- return null;
+ /**
+ * @return the address portion of the URL
+ */
+ private static HostSpec[] hostSpecs(Properties props) {
+ String[] hosts = PGProperty.PG_HOST.getOrDefault(props).split(",");
+ String[] ports = PGProperty.PG_PORT.getOrDefault(props).split(",");
+ String localSocketAddress = PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(props);
+ HostSpec[] hostSpecs = new HostSpec[hosts.length];
+ for (int i = 0; i < hostSpecs.length; i++) {
+ hostSpecs[i] = new HostSpec(hosts[i], Integer.parseInt(ports[i]), localSocketAddress);
}
- if (PGProperty.SERVICE.getName().equals(pName)) {
- serviceName = pValue;
- } else {
- priority1Url.setProperty(pName, pValue);
+ return hostSpecs;
+ }
+
+ /**
+ * @return the timeout from the URL, in milliseconds
+ */
+ private static long timeout(Properties props) {
+ String timeout = PGProperty.LOGIN_TIMEOUT.getOrDefault(props);
+ if (timeout != null) {
+ try {
+ return (long) (Float.parseFloat(timeout) * 1000);
+ } catch (NumberFormatException e) {
+ LOGGER.log(Level.WARNING, "Couldn't parse loginTimeout value: {0}", timeout);
+ }
}
- }
+ return (long) DriverManager.getLoginTimeout() * 1000;
}
- // load pg_service.conf
- if (serviceName != null) {
- LOGGER.log(Level.FINE, "Processing option [?service={0}]", serviceName);
- Properties result = PgServiceConfParser.getServiceProperties(serviceName);
- if (result == null) {
- LOGGER.log(Level.WARNING, "Definition of service [{0}] not found", serviceName);
- return null;
- }
- priority3Service.putAll(result);
+ /**
+ * This method was added in v6.5, and simply throws an SQLException for an unimplemented method. I
+ * decided to do it this way while implementing the JDBC2 extensions to JDBC, as it should help
+ * keep the overall driver size down. It now requires the call Class and the function name to help
+ * when the driver is used with closed software that don't report the stack trace
+ *
+ * @param callClass the call Class
+ * @param functionName the name of the unimplemented function with the type of its arguments
+ * @return PSQLException with a localized message giving the complete description of the
+ * unimplemented function
+ */
+ public static SQLFeatureNotSupportedException notImplemented(Class> callClass,
+ String functionName) {
+ return new SQLFeatureNotSupportedException(
+ GT.tr("Method {0} is not yet implemented.", callClass.getName() + "." + functionName),
+ PSQLState.NOT_IMPLEMENTED.getState());
}
- // combine result based on order of priority
- Properties result = new Properties();
- result.putAll(priority1Url);
- if (defaults != null) {
- // priority 2 - forEach() returns all entries EXCEPT defaults
- defaults.forEach(result::putIfAbsent);
- }
- priority3Service.forEach(result::putIfAbsent);
- if (defaults != null) {
- // priority 4 - stringPropertyNames() returns all entries INCLUDING defaults
- defaults.stringPropertyNames().forEach(s -> result.putIfAbsent(s, defaults.getProperty(s)));
- }
- // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
- result.putIfAbsent(PGProperty.PG_PORT.getName(), PGProperty.PG_PORT.getDefaultValue());
- result.putIfAbsent(PGProperty.PG_HOST.getName(), PGProperty.PG_HOST.getDefaultValue());
- if (PGProperty.USER.getOrDefault(result) != null) {
- result.putIfAbsent(PGProperty.PG_DBNAME.getName(), PGProperty.USER.getOrDefault(result));
+ public static SharedTimer getSharedTimer() {
+ return SHARED_TIMER;
}
- // consistency check
- if (!PGPropertyUtil.propertiesConsistencyCheck(result)) {
- return null;
+ /**
+ * Register the driver against {@link DriverManager}. This is done automatically when the class is
+ * loaded. Dropping the driver from DriverManager's list is possible using {@link #deregister()}
+ * method.
+ *
+ * @throws IllegalStateException if the driver is already registered
+ * @throws SQLException if registering the driver fails
+ */
+ public static void register() throws SQLException {
+ if (isRegistered()) {
+ throw new IllegalStateException(
+ "Driver is already registered. It can only be registered once.");
+ }
+ Driver registeredDriver = new Driver();
+ DriverManager.registerDriver(registeredDriver);
+ Driver.registeredDriver = registeredDriver;
}
- // try to load .pgpass if password is missing
- if (PGProperty.PASSWORD.getOrDefault(result) == null) {
- String password = PgPassParser.getPassword(
- PGProperty.PG_HOST.getOrDefault(result), PGProperty.PG_PORT.getOrDefault(result), PGProperty.PG_DBNAME.getOrDefault(result), PGProperty.USER.getOrDefault(result)
- );
- if (password != null && !password.isEmpty()) {
- PGProperty.PASSWORD.set(result, password);
- }
+ /**
+ * According to JDBC specification, this driver is registered against {@link DriverManager} when
+ * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the
+ * class can be gc'ed if necessary.
+ *
+ * @throws IllegalStateException if the driver is not registered
+ * @throws SQLException if deregistering the driver fails
+ */
+ public static void deregister() throws SQLException {
+ if (registeredDriver == null) {
+ throw new IllegalStateException(
+ "Driver is not registered (or it has not been registered using Driver.register() method)");
+ }
+ DriverManager.deregisterDriver(registeredDriver);
+ registeredDriver = null;
}
- //
- return result;
- }
- // decode url, on failure log and return null
- private static String urlDecode(String url) {
- try {
- return URLCoder.decode(url);
- } catch (IllegalArgumentException e) {
- LOGGER.log(Level.FINE, "Url [{0}] parsing failed with error [{1}]", new Object[]{url, e.getMessage()});
+ /**
+ * @return {@code true} if the driver is registered against {@link DriverManager}
+ */
+ public static boolean isRegistered() {
+ return registeredDriver != null;
}
- return null;
- }
- /**
- * @return the address portion of the URL
- */
- private static HostSpec[] hostSpecs(Properties props) {
- String[] hosts = PGProperty.PG_HOST.getOrDefault(props).split(",");
- String[] ports = PGProperty.PG_PORT.getOrDefault(props).split(",");
- String localSocketAddress = PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(props);
- HostSpec[] hostSpecs = new HostSpec[hosts.length];
- for (int i = 0; i < hostSpecs.length; i++) {
- hostSpecs[i] = new HostSpec(hosts[i], Integer.parseInt(ports[i]), localSocketAddress);
+ private Properties getDefaultProperties() throws IOException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (defaultProperties != null) {
+ return defaultProperties;
+ }
+
+ // Make sure we load properties with the maximum possible privileges.
+ try {
+ defaultProperties =
+ doPrivileged(new PrivilegedExceptionAction() {
+ @Override
+ public Properties run() throws IOException {
+ return loadDefaultProperties();
+ }
+ });
+ } catch (PrivilegedActionException e) {
+ Exception ex = e.getException();
+ if (ex instanceof IOException) {
+ throw (IOException) ex;
+ }
+ throw new RuntimeException(e);
+ } catch (Throwable e) {
+ if (e instanceof IOException) {
+ throw (IOException) e;
+ }
+ if (e instanceof RuntimeException) {
+ throw (RuntimeException) e;
+ }
+ if (e instanceof Error) {
+ throw (Error) e;
+ }
+ throw new RuntimeException(e);
+ }
+
+ return defaultProperties;
+ }
}
- return hostSpecs;
- }
- /**
- * @return the timeout from the URL, in milliseconds
- */
- private static long timeout(Properties props) {
- String timeout = PGProperty.LOGIN_TIMEOUT.getOrDefault(props);
- if (timeout != null) {
- try {
- return (long) (Float.parseFloat(timeout) * 1000);
- } catch (NumberFormatException e) {
- LOGGER.log(Level.WARNING, "Couldn't parse loginTimeout value: {0}", timeout);
- }
+ private Properties loadDefaultProperties() throws IOException {
+ Properties merged = new Properties();
+
+ try {
+ PGProperty.USER.set(merged, System.getProperty("user.name"));
+ } catch (SecurityException se) {
+ // We're just trying to set a default, so if we can't
+ // it's not a big deal.
+ }
+
+ // If we are loaded by the bootstrap classloader, getClassLoader()
+ // may return null. In that case, try to fall back to the system
+ // classloader.
+ //
+ // We should not need to catch SecurityException here as we are
+ // accessing either our own classloader, or the system classloader
+ // when our classloader is null. The ClassLoader javadoc claims
+ // neither case can throw SecurityException.
+ ClassLoader cl = getClass().getClassLoader();
+ if (cl == null) {
+ LOGGER.log(Level.FINE, "Can't find our classloader for the Driver; "
+ + "attempt to use the system class loader");
+ cl = ClassLoader.getSystemClassLoader();
+ }
+
+ if (cl == null) {
+ LOGGER.log(Level.WARNING, "Can't find a classloader for the Driver; not loading driver "
+ + "configuration from org/postgresql/driverconfig.properties");
+ return merged; // Give up on finding defaults.
+ }
+
+ LOGGER.log(Level.FINE, "Loading driver configuration via classloader {0}", cl);
+
+ // When loading the driver config files we don't want settings found
+ // in later files in the classpath to override settings specified in
+ // earlier files. To do this we've got to read the returned
+ // Enumeration into temporary storage.
+ ArrayList urls = new ArrayList<>();
+ Enumeration urlEnum = cl.getResources("org/postgresql/driverconfig.properties");
+ while (urlEnum.hasMoreElements()) {
+ urls.add(urlEnum.nextElement());
+ }
+
+ for (int i = urls.size() - 1; i >= 0; i--) {
+ URL url = urls.get(i);
+ LOGGER.log(Level.FINE, "Loading driver configuration from: {0}", url);
+ InputStream is = url.openStream();
+ merged.load(is);
+ is.close();
+ }
+
+ return merged;
}
- return (long) DriverManager.getLoginTimeout() * 1000;
- }
- /**
- * This method was added in v6.5, and simply throws an SQLException for an unimplemented method. I
- * decided to do it this way while implementing the JDBC2 extensions to JDBC, as it should help
- * keep the overall driver size down. It now requires the call Class and the function name to help
- * when the driver is used with closed software that don't report the stack trace
- *
- * @param callClass the call Class
- * @param functionName the name of the unimplemented function with the type of its arguments
- * @return PSQLException with a localized message giving the complete description of the
- * unimplemented function
- */
- public static SQLFeatureNotSupportedException notImplemented(Class> callClass,
- String functionName) {
- return new SQLFeatureNotSupportedException(
- GT.tr("Method {0} is not yet implemented.", callClass.getName() + "." + functionName),
- PSQLState.NOT_IMPLEMENTED.getState());
- }
+ /**
+ *
Try to make a database connection to the given URL. The driver should return "null" if it
+ * realizes it is the wrong kind of driver to connect to the given URL. This will be common, as
+ * when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each
+ * loaded driver in turn.
+ *
+ *
The driver should raise an SQLException if it is the right driver to connect to the given URL,
+ * but has trouble connecting to the database.
+ *
+ *
The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as
+ * connection arguments.
+ *
+ *
+ *
user - (required) The user to connect as
+ *
password - (optional) The password for the user
+ *
ssl -(optional) Use SSL when connecting to the server
+ *
readOnly - (optional) Set connection to read-only by default
+ *
charSet - (optional) The character set to be used for converting to/from
+ * the database to unicode. If multibyte is enabled on the server then the character set of the
+ * database is used as the default, otherwise the jvm character encoding is used as the default.
+ * This value is only used when connecting to a 7.2 or older server.
+ *
loglevel - (optional) Enable logging of messages from the driver. The value is an integer
+ * from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to
+ * DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.
+ *
compatible - (optional) This is used to toggle between different functionality
+ * as it changes across different releases of the jdbc driver code. The values here are versions
+ * of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on
+ * LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in
+ * functionality could be disabled by setting the compatible level to be "7.1", in which case the
+ * driver will revert to the 7.1 functionality.
+ *
+ *
+ *
Normally, at least "user" and "password" properties should be included in the properties. For a
+ * list of supported character encoding , see
+ * http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will
+ * probably want to have set up the Postgres database itself to use the same encoding, with the
+ * {@code -E } argument to createdb.
+ *
+ * @param url the URL of the database to connect to
+ * @param info a list of arbitrary tag/value pairs as connection arguments
+ * @return a connection to the URL or null if it isnt us
+ * @throws SQLException if a database access error occurs or the url is
+ * {@code null}
+ * @see java.sql.Driver#connect
+ */
+ @Override
+ public Connection connect(String url, Properties info) throws SQLException {
+ if (url == null) {
+ throw new SQLException("url is null");
+ }
+ // get defaults
+ Properties defaults;
- @Override
- public Logger getParentLogger() {
- return PARENT_LOGGER;
- }
+ if (!url.startsWith("jdbc:postgresql:")) {
+ return null;
+ }
+ try {
+ defaults = getDefaultProperties();
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Error loading default settings from driverconfig.properties"),
+ PSQLState.UNEXPECTED_ERROR, ioe);
+ }
- public static SharedTimer getSharedTimer() {
- return SHARED_TIMER;
- }
+ // override defaults with provided properties
+ Properties props = new Properties(defaults);
+ if (info != null) {
+ Set e = info.stringPropertyNames();
+ for (String propName : e) {
+ String propValue = info.getProperty(propName);
+ if (propValue == null) {
+ throw new PSQLException(
+ GT.tr("Properties for the driver contains a non-string value for the key ")
+ + propName,
+ PSQLState.UNEXPECTED_ERROR);
+ }
+ props.setProperty(propName, propValue);
+ }
+ }
+ // parse URL and add more properties
+ if ((props = parseURL(url, props)) == null) {
+ throw new PSQLException(
+ GT.tr("Unable to parse URL {0}", url),
+ PSQLState.UNEXPECTED_ERROR);
+ }
+ try {
- /**
- * Register the driver against {@link DriverManager}. This is done automatically when the class is
- * loaded. Dropping the driver from DriverManager's list is possible using {@link #deregister()}
- * method.
- *
- * @throws IllegalStateException if the driver is already registered
- * @throws SQLException if registering the driver fails
- */
- public static void register() throws SQLException {
- if (isRegistered()) {
- throw new IllegalStateException(
- "Driver is already registered. It can only be registered once.");
+ LOGGER.log(Level.FINE, "Connecting with URL: {0}", url);
+
+ // Enforce login timeout, if specified, by running the connection
+ // attempt in a separate thread. If we hit the timeout without the
+ // connection completing, we abandon the connection attempt in
+ // the calling thread, but the separate thread will keep trying.
+ // Eventually, the separate thread will either fail or complete
+ // the connection; at that point we clean up the connection if
+ // we managed to establish one after all. See ConnectThread for
+ // more details.
+ long timeout = timeout(props);
+ if (timeout <= 0) {
+ return makeConnection(url, props);
+ }
+
+ ConnectThread ct = new ConnectThread(url, props);
+ Thread thread = new Thread(ct, "PostgreSQL JDBC driver connection thread");
+ thread.setDaemon(true); // Don't prevent the VM from shutting down
+ thread.start();
+ return ct.getResult(timeout);
+ } catch (PSQLException ex1) {
+ LOGGER.log(Level.FINE, "Connection error: ", ex1);
+ // re-throw the exception, otherwise it will be caught next, and a
+ // org.postgresql.unusual error will be returned instead.
+ throw ex1;
+ } catch (Exception ex2) {
+ if ("java.security.AccessControlException".equals(ex2.getClass().getName())) {
+ // java.security.AccessControlException has been deprecated for removal, so compare the class name
+ throw new PSQLException(
+ GT.tr(
+ "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."),
+ PSQLState.UNEXPECTED_ERROR, ex2);
+ }
+ LOGGER.log(Level.FINE, "Unexpected connection error: ", ex2);
+ throw new PSQLException(
+ GT.tr(
+ "Something unusual has occurred to cause the driver to fail. Please report this exception."),
+ PSQLState.UNEXPECTED_ERROR, ex2);
+ }
}
- Driver registeredDriver = new Driver();
- DriverManager.registerDriver(registeredDriver);
- Driver.registeredDriver = registeredDriver;
- }
- /**
- * According to JDBC specification, this driver is registered against {@link DriverManager} when
- * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the
- * class can be gc'ed if necessary.
- *
- * @throws IllegalStateException if the driver is not registered
- * @throws SQLException if deregistering the driver fails
- */
- public static void deregister() throws SQLException {
- if (registeredDriver == null) {
- throw new IllegalStateException(
- "Driver is not registered (or it has not been registered using Driver.register() method)");
+ /**
+ * this is an empty method left here for graalvm
+ * we removed the ability to setup the logger from properties
+ * due to a security issue
+ *
+ * @param props Connection Properties
+ */
+ private void setupLoggerFromProperties(final Properties props) {
}
- DriverManager.deregisterDriver(registeredDriver);
- registeredDriver = null;
- }
- /**
- * @return {@code true} if the driver is registered against {@link DriverManager}
- */
- public static boolean isRegistered() {
- return registeredDriver != null;
- }
+ /**
+ * Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers
+ * will return true if they understand the subprotocol specified in the URL and false if they
+ * don't. Our protocols start with jdbc:postgresql:
+ *
+ * @param url the URL of the driver
+ * @return true if this driver accepts the given URL
+ * @see java.sql.Driver#acceptsURL
+ */
+ @Override
+ public boolean acceptsURL(String url) {
+ return parseURL(url, null) != null;
+ }
+
+ /**
+ *
The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties
+ * it should prompt a human for in order to get enough information to connect to a database.
+ *
+ *
Note that depending on the values the human has supplied so far, additional values may become
+ * necessary, so it may be necessary to iterate through several calls to getPropertyInfo
+ *
+ * @param url the Url of the database to connect to
+ * @param info a proposed list of tag/value pairs that will be sent on connect open.
+ * @return An array of DriverPropertyInfo objects describing possible properties. This array may
+ * be an empty array if no properties are required
+ * @see java.sql.Driver#getPropertyInfo
+ */
+ @Override
+ public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) {
+ Properties copy = new Properties(info);
+ Properties parse = parseURL(url, copy);
+ if (parse != null) {
+ copy = parse;
+ }
+
+ PGProperty[] knownProperties = PGProperty.values();
+ DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length];
+ for (int i = 0; i < props.length; i++) {
+ props[i] = knownProperties[i].toDriverPropertyInfo(copy);
+ }
+
+ return props;
+ }
+
+ @Override
+ public int getMajorVersion() {
+ return DriverInfo.MAJOR_VERSION;
+ }
+
+ @Override
+ public int getMinorVersion() {
+ return DriverInfo.MINOR_VERSION;
+ }
+
+ /**
+ *
Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true"
+ * here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC
+ * compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.
+ *
+ *
For PostgreSQL, this is not yet possible, as we are not SQL92 compliant (yet).
+ */
+ @Override
+ public boolean jdbcCompliant() {
+ return false;
+ }
+
+ @Override
+ public Logger getParentLogger() {
+ return PARENT_LOGGER;
+ }
+
+ /**
+ * Perform a connect in a separate thread; supports getting the results from the original thread
+ * while enforcing a login timeout.
+ */
+ private static class ConnectThread implements Runnable {
+ private final ResourceLock lock = new ResourceLock();
+ private final Condition lockCondition = lock.newCondition();
+ private final String url;
+ private final Properties props;
+ private Connection result;
+ private Throwable resultException;
+ private boolean abandoned;
+ ConnectThread(String url, Properties props) {
+ this.url = url;
+ this.props = props;
+ }
+
+ @Override
+ public void run() {
+ Connection conn;
+ Throwable error;
+
+ try {
+ conn = makeConnection(url, props);
+ error = null;
+ } catch (Throwable t) {
+ conn = null;
+ error = t;
+ }
+
+ try (ResourceLock ignore = lock.obtain()) {
+ if (abandoned) {
+ if (conn != null) {
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+ } else {
+ result = conn;
+ resultException = error;
+ lockCondition.signal();
+ }
+ }
+ }
+
+ /**
+ * Get the connection result from this (assumed running) thread. If the timeout is reached
+ * without a result being available, a SQLException is thrown.
+ *
+ * @param timeout timeout in milliseconds
+ * @return the new connection, if successful
+ * @throws SQLException if a connection error occurs or the timeout is reached
+ */
+ public Connection getResult(long timeout) throws SQLException {
+ long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout;
+ try (ResourceLock ignore = lock.obtain()) {
+ while (true) {
+ if (result != null) {
+ return result;
+ }
+
+ Throwable resultException = this.resultException;
+ if (resultException != null) {
+ if (resultException instanceof SQLException) {
+ resultException.fillInStackTrace();
+ throw (SQLException) resultException;
+ } else {
+ throw new PSQLException(
+ GT.tr(
+ "Something unusual has occurred to cause the driver to fail. Please report this exception."),
+ PSQLState.UNEXPECTED_ERROR, resultException);
+ }
+ }
+
+ long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
+ if (delay <= 0) {
+ abandoned = true;
+ throw new PSQLException(GT.tr("Connection attempt timed out."),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+ }
+
+ try {
+ lockCondition.await(delay, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException ie) {
+
+ // reset the interrupt flag
+ Thread.currentThread().interrupt();
+ abandoned = true;
+
+ // throw an unchecked exception which will hopefully not be ignored by the calling code
+ throw new RuntimeException(GT.tr("Interrupted while attempting to connect."));
+ }
+ }
+ }
+ }
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/PGConnection.java b/pgjdbc/src/main/java/org/postgresql/PGConnection.java
index b0b438c..124cac7 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGConnection.java
@@ -32,350 +32,349 @@ import java.util.Map;
*/
public interface PGConnection {
- /**
- * Creates an {@link Array} wrapping elements. This is similar to
- * {@link java.sql.Connection#createArrayOf(String, Object[])}, but also
- * provides support for primitive arrays.
- *
- * @param typeName
- * The SQL name of the type to map the elements to.
- * Must not be {@code null}.
- * @param elements
- * The array of objects to map. A {@code null} value will result in
- * an {@link Array} representing {@code null}.
- * @return An {@link Array} wrapping elements.
- * @throws SQLException
- * If for some reason the array cannot be created.
- * @see java.sql.Connection#createArrayOf(String, Object[])
- */
- Array createArrayOf(String typeName, Object elements) throws SQLException;
+ /**
+ * Creates an {@link Array} wrapping elements. This is similar to
+ * {@link java.sql.Connection#createArrayOf(String, Object[])}, but also
+ * provides support for primitive arrays.
+ *
+ * @param typeName The SQL name of the type to map the elements to.
+ * Must not be {@code null}.
+ * @param elements The array of objects to map. A {@code null} value will result in
+ * an {@link Array} representing {@code null}.
+ * @return An {@link Array} wrapping elements.
+ * @throws SQLException If for some reason the array cannot be created.
+ * @see java.sql.Connection#createArrayOf(String, Object[])
+ */
+ Array createArrayOf(String typeName, Object elements) throws SQLException;
- /**
- * This method returns any notifications that have been received since the last call to this
- * method. Returns null if there have been no notifications.
- *
- * @return notifications that have been received
- * @throws SQLException if something wrong happens
- * @since 7.3
- */
- PGNotification[] getNotifications() throws SQLException;
+ /**
+ * This method returns any notifications that have been received since the last call to this
+ * method. Returns null if there have been no notifications.
+ *
+ * @return notifications that have been received
+ * @throws SQLException if something wrong happens
+ * @since 7.3
+ */
+ PGNotification[] getNotifications() throws SQLException;
- /**
- * This method returns any notifications that have been received since the last call to this
- * method. Returns null if there have been no notifications. A timeout can be specified so the
- * driver waits for notifications.
- *
- * @param timeoutMillis when 0, blocks forever. when > 0, blocks up to the specified number of millis
- * or until at least one notification has been received. If more than one notification is
- * about to be received, these will be returned in one batch.
- * @return notifications that have been received
- * @throws SQLException if something wrong happens
- * @since 43
- */
- PGNotification[] getNotifications(int timeoutMillis) throws SQLException;
+ /**
+ * This method returns any notifications that have been received since the last call to this
+ * method. Returns null if there have been no notifications. A timeout can be specified so the
+ * driver waits for notifications.
+ *
+ * @param timeoutMillis when 0, blocks forever. when > 0, blocks up to the specified number of millis
+ * or until at least one notification has been received. If more than one notification is
+ * about to be received, these will be returned in one batch.
+ * @return notifications that have been received
+ * @throws SQLException if something wrong happens
+ * @since 43
+ */
+ PGNotification[] getNotifications(int timeoutMillis) throws SQLException;
- /**
- * This returns the COPY API for the current connection.
- *
- * @return COPY API for the current connection
- * @throws SQLException if something wrong happens
- * @since 8.4
- */
- CopyManager getCopyAPI() throws SQLException;
+ /**
+ * This returns the COPY API for the current connection.
+ *
+ * @return COPY API for the current connection
+ * @throws SQLException if something wrong happens
+ * @since 8.4
+ */
+ CopyManager getCopyAPI() throws SQLException;
- /**
- * This returns the LargeObject API for the current connection.
- *
- * @return LargeObject API for the current connection
- * @throws SQLException if something wrong happens
- * @since 7.3
- */
- LargeObjectManager getLargeObjectAPI() throws SQLException;
+ /**
+ * This returns the LargeObject API for the current connection.
+ *
+ * @return LargeObject API for the current connection
+ * @throws SQLException if something wrong happens
+ * @since 7.3
+ */
+ LargeObjectManager getLargeObjectAPI() throws SQLException;
- /**
- * This returns the Fastpath API for the current connection.
- *
- * @return Fastpath API for the current connection
- * @throws SQLException if something wrong happens
- * @since 7.3
- * @deprecated This API is somewhat obsolete, as one may achieve similar performance
- * and greater functionality by setting up a prepared statement to define
- * the function call. Then, executing the statement with binary transmission of parameters
- * and results substitutes for a fast-path function call.
- */
- @Deprecated
- Fastpath getFastpathAPI() throws SQLException;
+ /**
+ * This returns the Fastpath API for the current connection.
+ *
+ * @return Fastpath API for the current connection
+ * @throws SQLException if something wrong happens
+ * @since 7.3
+ * @deprecated This API is somewhat obsolete, as one may achieve similar performance
+ * and greater functionality by setting up a prepared statement to define
+ * the function call. Then, executing the statement with binary transmission of parameters
+ * and results substitutes for a fast-path function call.
+ */
+ @Deprecated
+ Fastpath getFastpathAPI() throws SQLException;
- /**
- * This allows client code to add a handler for one of org.postgresql's more unique data types. It
- * is approximately equivalent to addDataType(type, Class.forName(name)).
- *
- * @param type JDBC type name
- * @param className class name
- * @throws RuntimeException if the type cannot be registered (class not found, etc).
- * @deprecated As of 8.0, replaced by {@link #addDataType(String, Class)}. This deprecated method
- * does not work correctly for registering classes that cannot be directly loaded by
- * the JDBC driver's classloader.
- */
- @Deprecated
- void addDataType(String type, String className);
+ /**
+ * This allows client code to add a handler for one of org.postgresql's more unique data types. It
+ * is approximately equivalent to addDataType(type, Class.forName(name)).
+ *
+ * @param type JDBC type name
+ * @param className class name
+ * @throws RuntimeException if the type cannot be registered (class not found, etc).
+ * @deprecated As of 8.0, replaced by {@link #addDataType(String, Class)}. This deprecated method
+ * does not work correctly for registering classes that cannot be directly loaded by
+ * the JDBC driver's classloader.
+ */
+ @Deprecated
+ void addDataType(String type, String className);
- /**
- *
This allows client code to add a handler for one of org.postgresql's more unique data types.
where myconn is an open Connection to org.postgresql.
- *
- *
The handling class must extend org.postgresql.util.PGobject
- *
- * @param type the PostgreSQL type to register
- * @param klass the class implementing the Java representation of the type; this class must
- * implement {@link org.postgresql.util.PGobject}).
- * @throws SQLException if klass does not implement
- * {@link org.postgresql.util.PGobject}).
- * @see org.postgresql.util.PGobject
- * @since 8.0
- */
- void addDataType(String type, Class extends PGobject> klass) throws SQLException;
+ /**
+ *
This allows client code to add a handler for one of org.postgresql's more unique data types.
where myconn is an open Connection to org.postgresql.
+ *
+ *
The handling class must extend org.postgresql.util.PGobject
+ *
+ * @param type the PostgreSQL type to register
+ * @param klass the class implementing the Java representation of the type; this class must
+ * implement {@link org.postgresql.util.PGobject}).
+ * @throws SQLException if klass does not implement
+ * {@link org.postgresql.util.PGobject}).
+ * @see org.postgresql.util.PGobject
+ * @since 8.0
+ */
+ void addDataType(String type, Class extends PGobject> klass) throws SQLException;
- /**
- * Set the default statement reuse threshold before enabling server-side prepare. See
- * {@link org.postgresql.PGStatement#setPrepareThreshold(int)} for details.
- *
- * @param threshold the new threshold
- * @since build 302
- */
- void setPrepareThreshold(int threshold);
+ /**
+ * Get the default server-side prepare reuse threshold for statements created from this
+ * connection.
+ *
+ * @return the current threshold
+ * @since build 302
+ */
+ int getPrepareThreshold();
- /**
- * Get the default server-side prepare reuse threshold for statements created from this
- * connection.
- *
- * @return the current threshold
- * @since build 302
- */
- int getPrepareThreshold();
+ /**
+ * Set the default statement reuse threshold before enabling server-side prepare. See
+ * {@link org.postgresql.PGStatement#setPrepareThreshold(int)} for details.
+ *
+ * @param threshold the new threshold
+ * @since build 302
+ */
+ void setPrepareThreshold(int threshold);
- /**
- * Set the default fetch size for statements created from this connection.
- *
- * @param fetchSize new default fetch size
- * @throws SQLException if specified negative fetchSize parameter
- * @see Statement#setFetchSize(int)
- */
- void setDefaultFetchSize(int fetchSize) throws SQLException;
+ /**
+ * Get the default fetch size for statements created from this connection.
+ *
+ * @return current state for default fetch size
+ * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
+ * @see Statement#getFetchSize()
+ */
+ int getDefaultFetchSize();
- /**
- * Get the default fetch size for statements created from this connection.
- *
- * @return current state for default fetch size
- * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
- * @see Statement#getFetchSize()
- */
- int getDefaultFetchSize();
+ /**
+ * Set the default fetch size for statements created from this connection.
+ *
+ * @param fetchSize new default fetch size
+ * @throws SQLException if specified negative fetchSize parameter
+ * @see Statement#setFetchSize(int)
+ */
+ void setDefaultFetchSize(int fetchSize) throws SQLException;
- /**
- * Return the process ID (PID) of the backend server process handling this connection.
- *
- * @return PID of backend server process.
- */
- int getBackendPID();
+ /**
+ * Return the process ID (PID) of the backend server process handling this connection.
+ *
+ * @return PID of backend server process.
+ */
+ int getBackendPID();
- /**
- * Sends a query cancellation for this connection.
- * @throws SQLException if there are problems cancelling the query
- */
- void cancelQuery() throws SQLException;
+ /**
+ * Sends a query cancellation for this connection.
+ *
+ * @throws SQLException if there are problems cancelling the query
+ */
+ void cancelQuery() throws SQLException;
- /**
- * Return the given string suitably quoted to be used as an identifier in an SQL statement string.
- * Quotes are added only if necessary (i.e., if the string contains non-identifier characters or
- * would be case-folded). Embedded quotes are properly doubled.
- *
- * @param identifier input identifier
- * @return the escaped identifier
- * @throws SQLException if something goes wrong
- */
- String escapeIdentifier(String identifier) throws SQLException;
+ /**
+ * Return the given string suitably quoted to be used as an identifier in an SQL statement string.
+ * Quotes are added only if necessary (i.e., if the string contains non-identifier characters or
+ * would be case-folded). Embedded quotes are properly doubled.
+ *
+ * @param identifier input identifier
+ * @return the escaped identifier
+ * @throws SQLException if something goes wrong
+ */
+ String escapeIdentifier(String identifier) throws SQLException;
- /**
- * Return the given string suitably quoted to be used as a string literal in an SQL statement
- * string. Embedded single-quotes and backslashes are properly doubled. Note that quote_literal
- * returns null on null input.
- *
- * @param literal input literal
- * @return the quoted literal
- * @throws SQLException if something goes wrong
- */
- String escapeLiteral(String literal) throws SQLException;
+ /**
+ * Return the given string suitably quoted to be used as a string literal in an SQL statement
+ * string. Embedded single-quotes and backslashes are properly doubled. Note that quote_literal
+ * returns null on null input.
+ *
+ * @param literal input literal
+ * @return the quoted literal
+ * @throws SQLException if something goes wrong
+ */
+ String escapeLiteral(String literal) throws SQLException;
- /**
- *
Returns the query mode for this connection.
- *
- *
When running in simple query mode, certain features are not available: callable statements,
- * partial result set fetch, bytea type, etc.
- *
The list of supported features is subject to change.
- * If the specific encryption type is not specified, this method defaults to querying the database server for the server's default password_encryption.
- * This method does not send the new password in plain text to the server.
- * Instead, it encrypts the password locally and sends the encoded hash so that the plain text password is never sent on the wire.
- *
- *
- *
- * Acceptable values for encryptionType are null, "md5", or "scram-sha-256".
- * Users should avoid "md5" unless they are explicitly targeting an older server that does not support the more secure SCRAM.
- *
- *
- * @param user The username of the database user
- * @param newPassword The new password for the database user. The implementation will zero
- * out the array after use
- * @param encryptionType The type of password encryption to use or null if the database server default should be used.
- * @throws SQLException If the password could not be altered
- */
- default void alterUserPassword(String user, char[] newPassword, String encryptionType) throws SQLException {
- try (Statement stmt = ((Connection) this).createStatement()) {
- if (encryptionType == null) {
- try (ResultSet rs = stmt.executeQuery("SHOW password_encryption")) {
- if (!rs.next()) {
- throw new PSQLException(GT.tr("Expected a row when reading password_encryption but none was found"),
- PSQLState.NO_DATA);
- }
- encryptionType = rs.getString(1);
- if (encryptionType == null) {
- throw new PSQLException(GT.tr("SHOW password_encryption returned null value"),
- PSQLState.NO_DATA);
- }
+ /**
+ * Change a user's password to the specified new password.
+ *
+ *
+ * If the specific encryption type is not specified, this method defaults to querying the database server for the server's default password_encryption.
+ * This method does not send the new password in plain text to the server.
+ * Instead, it encrypts the password locally and sends the encoded hash so that the plain text password is never sent on the wire.
+ *
+ *
+ *
+ * Acceptable values for encryptionType are null, "md5", or "scram-sha-256".
+ * Users should avoid "md5" unless they are explicitly targeting an older server that does not support the more secure SCRAM.
+ *
+ *
+ * @param user The username of the database user
+ * @param newPassword The new password for the database user. The implementation will zero
+ * out the array after use
+ * @param encryptionType The type of password encryption to use or null if the database server default should be used.
+ * @throws SQLException If the password could not be altered
+ */
+ default void alterUserPassword(String user, char[] newPassword, String encryptionType) throws SQLException {
+ try (Statement stmt = ((Connection) this).createStatement()) {
+ if (encryptionType == null) {
+ try (ResultSet rs = stmt.executeQuery("SHOW password_encryption")) {
+ if (!rs.next()) {
+ throw new PSQLException(GT.tr("Expected a row when reading password_encryption but none was found"),
+ PSQLState.NO_DATA);
+ }
+ encryptionType = rs.getString(1);
+ if (encryptionType == null) {
+ throw new PSQLException(GT.tr("SHOW password_encryption returned null value"),
+ PSQLState.NO_DATA);
+ }
+ }
+ }
+ String sql = PasswordUtil.genAlterUserPasswordSQL(user, newPassword, encryptionType);
+ stmt.execute(sql);
+ } finally {
+ Arrays.fill(newPassword, (char) 0);
}
- }
- String sql = PasswordUtil.genAlterUserPasswordSQL(user, newPassword, encryptionType);
- stmt.execute(sql);
- } finally {
- Arrays.fill(newPassword, (char) 0);
}
- }
- /**
- *
Returns the current values of all parameters reported by the server.
- *
- *
PostgreSQL reports values for a subset of parameters (GUCs) to the client
- * at connect-time, then sends update messages whenever the values change
- * during a session. PgJDBC records the latest values and exposes it to client
- * applications via getParameterStatuses().
- *
- *
PgJDBC exposes individual accessors for some of these parameters as
- * listed below. They are more backwards-compatible and should be preferred
- * where possible.
- *
- *
Not all parameters are reported, only those marked
- * GUC_REPORT in the source code. The pg_settings
- * view does not expose information about which parameters are reportable.
- * PgJDBC's map will only contain the parameters the server reports values
- * for, so you cannot use this method as a substitute for running a
- * SHOW paramname; or SELECT
- * current_setting('paramname'); query for arbitrary parameters.
- *
- *
Parameter names are case-insensitive and case-preserving
- * in this map, like in PostgreSQL itself. So DateStyle and
- * datestyle are the same key.
- *
- *
- * As of PostgreSQL 11 the reportable parameter list, and related PgJDBC
- * interfaces or assessors, are:
- *
Returns the current values of all parameters reported by the server.
+ *
+ *
PostgreSQL reports values for a subset of parameters (GUCs) to the client
+ * at connect-time, then sends update messages whenever the values change
+ * during a session. PgJDBC records the latest values and exposes it to client
+ * applications via getParameterStatuses().
+ *
+ *
PgJDBC exposes individual accessors for some of these parameters as
+ * listed below. They are more backwards-compatible and should be preferred
+ * where possible.
+ *
+ *
Not all parameters are reported, only those marked
+ * GUC_REPORT in the source code. The pg_settings
+ * view does not expose information about which parameters are reportable.
+ * PgJDBC's map will only contain the parameters the server reports values
+ * for, so you cannot use this method as a substitute for running a
+ * SHOW paramname; or SELECT
+ * current_setting('paramname'); query for arbitrary parameters.
+ *
+ *
Parameter names are case-insensitive and case-preserving
+ * in this map, like in PostgreSQL itself. So DateStyle and
+ * datestyle are the same key.
+ *
+ *
+ * As of PostgreSQL 11 the reportable parameter list, and related PgJDBC
+ * interfaces or assessors, are:
+ *
+ * client_encoding - PgJDBC always sets this to UTF8.
+ * See allowEncodingChanges connection property.
+ *
+ *
DateStyle - PgJDBC requires this to always be set to ISO
+ *
standard_conforming_strings - indirectly via {@link #escapeLiteral(String)}
+ *
+ * TimeZone - set from JDK timezone see {@link java.util.TimeZone#getDefault()}
+ * and {@link java.util.TimeZone#setDefault(TimeZone)}
+ *
+ *
integer_datetimes
+ *
IntervalStyle
+ *
server_encoding
+ *
server_version
+ *
is_superuser
+ *
session_authorization
+ *
+ *
+ *
Note that some PgJDBC operations will change server parameters
+ * automatically.
+ *
+ * @return unmodifiable map of case-insensitive parameter names to parameter values
+ * @since 42.2.6
+ */
+ Map getParameterStatuses();
- /**
- * Shorthand for getParameterStatuses().get(...) .
- *
- * @param parameterName case-insensitive parameter name
- * @return parameter value if defined, or null if no parameter known
- * @see #getParameterStatuses
- * @since 42.2.6
- */
- String getParameterStatus(String parameterName);
+ /**
+ * Shorthand for getParameterStatuses().get(...) .
+ *
+ * @param parameterName case-insensitive parameter name
+ * @return parameter value if defined, or null if no parameter known
+ * @see #getParameterStatuses
+ * @since 42.2.6
+ */
+ String getParameterStatus(String parameterName);
- /**
- * Turn on/off adaptive fetch for connection. Existing statements and resultSets won't be affected
- * by change here.
- *
- * @param adaptiveFetch desired state of adaptive fetch.
- */
- void setAdaptiveFetch(boolean adaptiveFetch);
+ /**
+ * Get state of adaptive fetch for connection.
+ *
+ * @return state of adaptive fetch (turned on or off)
+ */
+ boolean getAdaptiveFetch();
- /**
- * Get state of adaptive fetch for connection.
- *
- * @return state of adaptive fetch (turned on or off)
- */
- boolean getAdaptiveFetch();
+ /**
+ * Turn on/off adaptive fetch for connection. Existing statements and resultSets won't be affected
+ * by change here.
+ *
+ * @param adaptiveFetch desired state of adaptive fetch.
+ */
+ void setAdaptiveFetch(boolean adaptiveFetch);
}
diff --git a/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java b/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java
index ac4e611..ae063cf 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java
@@ -14,95 +14,95 @@ import java.util.Map;
*/
public enum PGEnvironment {
- /**
- * Specified location of password file.
- */
- ORG_POSTGRESQL_PGPASSFILE(
- "org.postgresql.pgpassfile",
- null,
- "Specified location of password file."),
+ /**
+ * Specified location of password file.
+ */
+ ORG_POSTGRESQL_PGPASSFILE(
+ "org.postgresql.pgpassfile",
+ null,
+ "Specified location of password file."),
- /**
- * Specified location of password file.
- */
- PGPASSFILE(
- "PGPASSFILE",
- "pgpass",
- "Specified location of password file."),
+ /**
+ * Specified location of password file.
+ */
+ PGPASSFILE(
+ "PGPASSFILE",
+ "pgpass",
+ "Specified location of password file."),
- /**
- * The connection service resource (file, url) allows connection parameters to be associated
- * with a single service name.
- */
- ORG_POSTGRESQL_PGSERVICEFILE(
- "org.postgresql.pgservicefile",
- null,
- "Specifies the service resource to resolve connection properties."),
+ /**
+ * The connection service resource (file, url) allows connection parameters to be associated
+ * with a single service name.
+ */
+ ORG_POSTGRESQL_PGSERVICEFILE(
+ "org.postgresql.pgservicefile",
+ null,
+ "Specifies the service resource to resolve connection properties."),
- /**
- * The connection service resource (file, url) allows connection parameters to be associated
- * with a single service name.
- */
- PGSERVICEFILE(
- "PGSERVICEFILE",
- "pg_service.conf",
- "Specifies the service resource to resolve connection properties."),
+ /**
+ * The connection service resource (file, url) allows connection parameters to be associated
+ * with a single service name.
+ */
+ PGSERVICEFILE(
+ "PGSERVICEFILE",
+ "pg_service.conf",
+ "Specifies the service resource to resolve connection properties."),
- /**
- * sets the directory containing the PGSERVICEFILE file and possibly other system-wide
- * configuration files.
- */
- PGSYSCONFDIR(
- "PGSYSCONFDIR",
- null,
- "Specifies the directory containing the PGSERVICEFILE file"),
- ;
+ /**
+ * sets the directory containing the PGSERVICEFILE file and possibly other system-wide
+ * configuration files.
+ */
+ PGSYSCONFDIR(
+ "PGSYSCONFDIR",
+ null,
+ "Specifies the directory containing the PGSERVICEFILE file"),
+ ;
- private final String name;
- private final String defaultValue;
- private final String description;
+ private static final Map PROPS_BY_NAME = new HashMap<>();
- PGEnvironment(String name, String defaultValue, String description) {
- this.name = name;
- this.defaultValue = defaultValue;
- this.description = description;
- }
-
- private static final Map PROPS_BY_NAME = new HashMap<>();
-
- static {
- for (PGEnvironment prop : PGEnvironment.values()) {
- if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
- throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
- }
+ static {
+ for (PGEnvironment prop : PGEnvironment.values()) {
+ if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
+ throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
+ }
+ }
}
- }
- /**
- * Returns the name of the parameter.
- *
- * @return the name of the parameter
- */
- public String getName() {
- return name;
- }
+ private final String name;
+ private final String defaultValue;
+ private final String description;
- /**
- * Returns the default value for this parameter.
- *
- * @return the default value for this parameter or null
- */
- public String getDefaultValue() {
- return defaultValue;
- }
+ PGEnvironment(String name, String defaultValue, String description) {
+ this.name = name;
+ this.defaultValue = defaultValue;
+ this.description = description;
+ }
- /**
- * Returns the description for this parameter.
- *
- * @return the description for this parameter
- */
- public String getDescription() {
- return description;
- }
+ /**
+ * Returns the name of the parameter.
+ *
+ * @return the name of the parameter
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Returns the default value for this parameter.
+ *
+ * @return the default value for this parameter or null
+ */
+ public String getDefaultValue() {
+ return defaultValue;
+ }
+
+ /**
+ * Returns the description for this parameter.
+ *
+ * @return the description for this parameter
+ */
+ public String getDescription() {
+ return description;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/PGNotification.java b/pgjdbc/src/main/java/org/postgresql/PGNotification.java
index 03c8bb8..322a129 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGNotification.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGNotification.java
@@ -9,29 +9,29 @@ package org.postgresql;
* This interface defines the public PostgreSQL extension for Notifications.
*/
public interface PGNotification {
- /**
- * Returns name of this notification.
- *
- * @return name of this notification
- * @since 7.3
- */
- String getName();
+ /**
+ * Returns name of this notification.
+ *
+ * @return name of this notification
+ * @since 7.3
+ */
+ String getName();
- /**
- * Returns the process id of the backend process making this notification.
- *
- * @return process id of the backend process making this notification
- * @since 7.3
- */
- int getPID();
+ /**
+ * Returns the process id of the backend process making this notification.
+ *
+ * @return process id of the backend process making this notification
+ * @since 7.3
+ */
+ int getPID();
- /**
- * Returns additional information from the notifying process. This feature has only been
- * implemented in server versions 9.0 and later, so previous versions will always return an empty
- * String.
- *
- * @return additional information from the notifying process
- * @since 8.0
- */
- String getParameter();
+ /**
+ * Returns additional information from the notifying process. This feature has only been
+ * implemented in server versions 9.0 and later, so previous versions will always return an empty
+ * String.
+ *
+ * @return additional information from the notifying process
+ * @since 8.0
+ */
+ String getParameter();
}
diff --git a/pgjdbc/src/main/java/org/postgresql/PGProperty.java b/pgjdbc/src/main/java/org/postgresql/PGProperty.java
index 571ed36..81e6123 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGProperty.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGProperty.java
@@ -22,1010 +22,1015 @@ import java.util.Properties;
*/
public enum PGProperty {
- /**
- * Specifies if number of rows, used during fetching rows of a result set, should be computed
- * dynamically. Number of rows will be calculated by dividing maxResultBuffer size by max row size
- * observed so far, rounded down. First fetch will have number of rows declared in
- * defaultRowFetchSize. Number of rows can be limited by adaptiveFetchMinimum and
- * adaptiveFetchMaximum. Requires declaring of maxResultBuffer and defaultRowFetchSize to work.
- * Default value is false.
- */
- ADAPTIVE_FETCH(
- "adaptiveFetch",
- "false",
- "Specifies if number of rows fetched in ResultSet should be adaptive to maxResultBuffer and max row size."),
-
- /**
- * Specifies the highest number of rows which can be calculated by adaptiveFetch. Requires
- * adaptiveFetch set to true to work. Default value is -1 (used as infinity).
- */
- ADAPTIVE_FETCH_MAXIMUM(
- "adaptiveFetchMaximum",
- "-1",
- "Specifies maximum number of rows used by adaptive fetch."),
-
- /**
- * Specifies the lowest number of rows which can be calculated by adaptiveFetch. Requires
- * adaptiveFetch set to true to work. Default value is 0.
- */
- ADAPTIVE_FETCH_MINIMUM(
- "adaptiveFetchMinimum",
- "0",
- "Specifies minimum number of rows used by adaptive fetch."),
-
- /**
- * When using the V3 protocol the driver monitors changes in certain server configuration
- * parameters that should not be touched by end users. The {@code client_encoding} setting is set
- * by the driver and should not be altered. If the driver detects a change it will abort the
- * connection.
- */
- ALLOW_ENCODING_CHANGES(
- "allowEncodingChanges",
- "false",
- "Allow for changes in client_encoding"),
-
- /**
- * The application name (require server version >= 9.0).
- */
- APPLICATION_NAME(
- "ApplicationName",
- DriverInfo.DRIVER_NAME,
- "Name of the Application (backend >= 9.0)"),
-
- /**
- * Assume the server is at least that version.
- */
- ASSUME_MIN_SERVER_VERSION(
- "assumeMinServerVersion",
- null,
- "Assume the server is at least that version"),
-
- /**
- * AuthenticationPluginClass
- */
-
- AUTHENTICATION_PLUGIN_CLASS_NAME(
- "authenticationPluginClassName",
- null,
- "Name of class which implements AuthenticationPlugin"
- ),
-
- /**
- * Specifies what the driver should do if a query fails. In {@code autosave=always} mode, JDBC driver sets a savepoint before each query,
- * and rolls back to that savepoint in case of failure. In {@code autosave=never} mode (default), no savepoint dance is made ever.
- * In {@code autosave=conservative} mode, savepoint is set for each query, however the rollback is done only for rare cases
- * like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries
- */
- AUTOSAVE(
- "autosave",
- "never",
- "Specifies what the driver should do if a query fails. In autosave=always mode, JDBC driver sets a savepoint before each query, "
- + "and rolls back to that savepoint in case of failure. In autosave=never mode (default), no savepoint dance is made ever. "
- + "In autosave=conservative mode, safepoint is set for each query, however the rollback is done only for rare cases"
- + " like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries",
- false,
- new String[]{"always", "never", "conservative"}),
-
- /**
- * Use binary format for sending and receiving data if possible.
- */
- BINARY_TRANSFER(
- "binaryTransfer",
- "true",
- "Use binary format for sending and receiving data if possible"),
-
- /**
- * Comma separated list of types to disable binary transfer. Either OID numbers or names.
- * Overrides values in the driver default set and values set with binaryTransferEnable.
- */
- BINARY_TRANSFER_DISABLE(
- "binaryTransferDisable",
- "",
- "Comma separated list of types to disable binary transfer. Either OID numbers or names. Overrides values in the driver default set and values set with binaryTransferEnable."),
-
- /**
- * Comma separated list of types to enable binary transfer. Either OID numbers or names
- */
- BINARY_TRANSFER_ENABLE(
- "binaryTransferEnable",
- "",
- "Comma separated list of types to enable binary transfer. Either OID numbers or names"),
-
- /**
- * Cancel command is sent out of band over its own connection, so cancel message can itself get
- * stuck.
- * This property controls "connect timeout" and "socket timeout" used for cancel commands.
- * The timeout is specified in seconds. Default value is 10 seconds.
- */
- CANCEL_SIGNAL_TIMEOUT(
- "cancelSignalTimeout",
- "10",
- "The timeout that is used for sending cancel command."),
-
- /**
- * Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not
- */
- CLEANUP_SAVEPOINTS(
- "cleanupSavepoints",
- "false",
- "Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not",
- false,
- new String[]{"true", "false"}),
-
- /**
- *
The timeout value used for socket connect operations. If connecting to the server takes longer
- * than this value, the connection is broken.
- *
- *
The timeout is specified in seconds and a value of zero means that it is disabled.
- */
- CONNECT_TIMEOUT(
- "connectTimeout",
- "10",
- "The timeout value in seconds used for socket connect operations."),
-
- /**
- * Specify the schema (or several schema separated by commas) to be set in the search-path. This schema will be used to resolve
- * unqualified object names used in statements over this connection.
- */
- CURRENT_SCHEMA(
- "currentSchema",
- null,
- "Specify the schema (or several schema separated by commas) to be set in the search-path"),
-
- /**
- * Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache.
- */
- DATABASE_METADATA_CACHE_FIELDS(
- "databaseMetadataCacheFields",
- "65536",
- "Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache."),
-
- /**
- * Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache.
- */
- DATABASE_METADATA_CACHE_FIELDS_MIB(
- "databaseMetadataCacheFieldsMiB",
- "5",
- "Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache."),
-
- /**
- * Default parameter for {@link java.sql.Statement#getFetchSize()}. A value of {@code 0} means
- * that need fetch all rows at once
- */
- DEFAULT_ROW_FETCH_SIZE(
- "defaultRowFetchSize",
- "0",
- "Positive number of rows that should be fetched from the database when more rows are needed for ResultSet by each fetch iteration"),
-
- /**
- * Enable optimization that disables column name sanitiser.
- */
- DISABLE_COLUMN_SANITISER(
- "disableColumnSanitiser",
- "false",
- "Enable optimization that disables column name sanitiser"),
-
- /**
- * Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)
- * In {@code escapeSyntaxCallMode=select} mode (the default), the driver always uses a SELECT statement (allowing function invocation only).
- * In {@code escapeSyntaxCallMode=callIfNoReturn} mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement.
- * In {@code escapeSyntaxCallMode=call} mode, the driver always uses a CALL statement (allowing procedure invocation only).
- */
- ESCAPE_SYNTAX_CALL_MODE(
- "escapeSyntaxCallMode",
- "select",
- "Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)"
- + "In escapeSyntaxCallMode=select mode (the default), the driver always uses a SELECT statement (allowing function invocation only)."
- + "In escapeSyntaxCallMode=callIfNoReturn mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement."
- + "In escapeSyntaxCallMode=call mode, the driver always uses a CALL statement (allowing procedure invocation only).",
- false,
- new String[]{"select", "callIfNoReturn", "call"}),
-
- /**
- * Group startup parameters in a transaction
- * This is important in pool-by-transaction scenarios in order to make sure that all the statements
- * reaches the same connection that is being initialized. All of the startup parameters will be wrapped
- * in a transaction
- * Note this is off by default as pgbouncer in statement mode
- */
- GROUP_STARTUP_PARAMETERS(
- "groupStartupParameters",
- "false",
- "This is important in pool-by-transaction scenarios in order to make sure that all "
- + "the statements reaches the same connection that is being initialized."
- ),
-
- GSS_ENC_MODE(
- "gssEncMode",
- "allow",
- "Force Encoded GSS Mode",
- false,
- new String[]{"disable", "allow", "prefer", "require"}
- ),
-
- /**
- * Force one of
- *
- *
SSPI (Windows transparent single-sign-on)
- *
GSSAPI (Kerberos, via JSSE)
- *
- * to be used when the server requests Kerberos or SSPI authentication.
- */
- GSS_LIB(
- "gsslib",
- "auto",
- "Force SSSPI or GSSAPI",
- false,
- new String[]{"auto", "sspi", "gssapi"}),
-
- /**
- *
After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
- * without a timeout here, the client can wait forever. The pattern for requesting a GSS encrypted connection is the same so we provide the same
- * timeout mechanism This timeout will be set before the request and reset after
- */
- GSS_RESPONSE_TIMEOUT(
- "gssResponseTimeout",
- "5000",
- "Time in milliseconds we wait for a response from the server after requesting a GSS upgrade"),
-
-
- /**
- * Enable mode to filter out the names of database objects for which the current user has no privileges
- * granted from appearing in the DatabaseMetaData returned by the driver.
- */
- HIDE_UNPRIVILEGED_OBJECTS(
- "hideUnprivilegedObjects",
- "false",
- "Enable hiding of database objects for which the current user has no privileges granted from the DatabaseMetaData"),
-
- HOST_RECHECK_SECONDS(
- "hostRecheckSeconds",
- "10",
- "Specifies period (seconds) after which the host status is checked again in case it has changed"),
-
- /**
- * Specifies the name of the JAAS system or application login configuration.
- */
- JAAS_APPLICATION_NAME(
- "jaasApplicationName",
- "pgjdbc",
- "Specifies the name of the JAAS system or application login configuration."),
-
- /**
- * Flag to enable/disable obtaining a GSS credential via JAAS login before authenticating.
- * Useful if setting system property javax.security.auth.useSubjectCredsOnly=false
- * or using native GSS with system property sun.security.jgss.native=true
- */
- JAAS_LOGIN(
- "jaasLogin",
- "true",
- "Login with JAAS before doing GSSAPI authentication"),
-
- /**
- * The Kerberos service name to use when authenticating with GSSAPI. This is equivalent to libpq's
- * PGKRBSRVNAME environment variable.
- */
- KERBEROS_SERVER_NAME(
- "kerberosServerName",
- null,
- "The Kerberos service name to use when authenticating with GSSAPI."),
-
- LOAD_BALANCE_HOSTS(
- "loadBalanceHosts",
- "false",
- "If disabled hosts are connected in the given order. If enabled hosts are chosen randomly from the set of suitable candidates"),
-
- /**
- *
If this is set then the client side will bind to this address. This is useful if you need
- * to choose which interface to connect to.
- */
- LOCAL_SOCKET_ADDRESS(
- "localSocketAddress",
- null,
- "Local Socket address, if set bind the client side of the socket to this address"),
-
- /**
- * This property is no longer used by the driver and will be ignored.
- * @deprecated Logging is configured via java.util.logging.
- */
- @Deprecated
- LOGGER_FILE(
- "loggerFile",
- null,
- "File name output of the Logger"),
-
- /**
- * This property is no longer used by the driver and will be ignored.
- * @deprecated Logging is configured via java.util.logging.
- */
- @Deprecated
- LOGGER_LEVEL(
- "loggerLevel",
- null,
- "Logger level of the driver",
- false,
- new String[]{"OFF", "DEBUG", "TRACE"}),
-
- /**
- * Specify how long to wait for establishment of a database connection. The timeout is specified
- * in seconds.
- */
- LOGIN_TIMEOUT(
- "loginTimeout",
- "0",
- "Specify how long in seconds to wait for establishment of a database connection."),
-
- /**
- * Whether to include full server error detail in exception messages.
- */
- LOG_SERVER_ERROR_DETAIL(
- "logServerErrorDetail",
- "true",
- "Include full server error detail in exception messages. If disabled then only the error itself will be included."),
-
- /**
- * When connections that are not explicitly closed are garbage collected, log the stacktrace from
- * the opening of the connection to trace the leak source.
- */
- LOG_UNCLOSED_CONNECTIONS(
- "logUnclosedConnections",
- "false",
- "When connections that are not explicitly closed are garbage collected, log the stacktrace from the opening of the connection to trace the leak source"),
-
- /**
- * Specifies size of buffer during fetching result set. Can be specified as specified size or
- * percent of heap memory.
- */
- MAX_RESULT_BUFFER(
- "maxResultBuffer",
- null,
- "Specifies size of buffer during fetching result set. Can be specified as specified size or percent of heap memory."),
-
- /**
- * Specify 'options' connection initialization parameter.
- * The value of this parameter may contain spaces and other special characters or their URL representation.
- */
- OPTIONS(
- "options",
- null,
- "Specify 'options' connection initialization parameter."),
-
- /**
- * Password to use when authenticating.
- */
- PASSWORD(
- "password",
- null,
- "Password to use when authenticating.",
- false),
-
- /**
- * Database name to connect to (may be specified directly in the JDBC URL).
- */
- PG_DBNAME(
- "PGDBNAME",
- null,
- "Database name to connect to (may be specified directly in the JDBC URL)",
- true),
-
- /**
- * Hostname of the PostgreSQL server (may be specified directly in the JDBC URL).
- */
- PG_HOST(
- "PGHOST",
- "localhost",
- "Hostname of the PostgreSQL server (may be specified directly in the JDBC URL)",
- false),
-
- /**
- * Port of the PostgreSQL server (may be specified directly in the JDBC URL).
- */
- PG_PORT(
- "PGPORT",
- "5432",
- "Port of the PostgreSQL server (may be specified directly in the JDBC URL)"),
-
- /**
- *
Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only),
- * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only,
- * extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.
- *
- *
This mode is meant for debugging purposes and/or for cases when extended protocol cannot be used (e.g. logical replication protocol)
- */
- PREFER_QUERY_MODE(
- "preferQueryMode",
- "extended",
- "Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), "
- + "extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, "
- + "extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.", false,
- new String[]{"extended", "extendedForPrepared", "extendedCacheEverything", "simple"}),
-
- /**
- * Specifies the maximum number of entries in cache of prepared statements. A value of {@code 0}
- * disables the cache.
- */
- PREPARED_STATEMENT_CACHE_QUERIES(
- "preparedStatementCacheQueries",
- "256",
- "Specifies the maximum number of entries in per-connection cache of prepared statements. A value of {@code 0} disables the cache."),
-
- /**
- * Specifies the maximum size (in megabytes) of the prepared statement cache. A value of {@code 0}
- * disables the cache.
- */
- PREPARED_STATEMENT_CACHE_SIZE_MIB(
- "preparedStatementCacheSizeMiB",
- "5",
- "Specifies the maximum size (in megabytes) of a per-connection prepared statement cache. A value of {@code 0} disables the cache."),
-
- /**
- * Sets the default threshold for enabling server-side prepare. A value of {@code -1} stands for
- * forceBinary
- */
- PREPARE_THRESHOLD(
- "prepareThreshold",
- "5",
- "Statement prepare threshold. A value of {@code -1} stands for forceBinary"),
-
- /**
- * Force use of a particular protocol version when connecting, if set, disables protocol version
- * fallback.
- */
- PROTOCOL_VERSION(
- "protocolVersion",
- null,
- "Force use of a particular protocol version when connecting, currently only version 3 is supported.",
- false,
- new String[]{"3"}),
-
- /**
- * Quote returning columns.
- * There are some ORM's that quote everything, including returning columns
- * If we quote them, then we end up sending ""colname"" to the backend
- * which will not be found
- */
- QUOTE_RETURNING_IDENTIFIERS(
- "quoteReturningIdentifiers",
- "true",
- "Quote identifiers provided in returning array",
- false),
- /**
- * Puts this connection in read-only mode.
- */
- READ_ONLY(
- "readOnly",
- "false",
- "Puts this connection in read-only mode"),
-
- /**
- * Connection parameter to control behavior when
- * {@link Connection#setReadOnly(boolean)} is set to {@code true}.
- */
- READ_ONLY_MODE(
- "readOnlyMode",
- "transaction",
- "Controls the behavior when a connection is set to be read only, one of 'ignore', 'transaction', or 'always' "
- + "When 'ignore', setting readOnly has no effect. "
- + "When 'transaction' setting readOnly to 'true' will cause transactions to BEGIN READ ONLY if autocommit is 'false'. "
- + "When 'always' setting readOnly to 'true' will set the session to READ ONLY if autoCommit is 'true' "
- + "and the transaction to BEGIN READ ONLY if autocommit is 'false'.",
- false,
- new String[]{"ignore", "transaction", "always"}),
-
- /**
- * Socket read buffer size (SO_RECVBUF). A value of {@code -1}, which is the default, means system
- * default.
- */
- RECEIVE_BUFFER_SIZE(
- "receiveBufferSize",
- "-1",
- "Socket read buffer size"),
-
- /**
- *
Connection parameter passed in the startup message. This parameter accepts two values; "true"
- * and "database". Passing "true" tells the backend to go into walsender mode, wherein a small set
- * of replication commands can be issued instead of SQL statements. Only the simple query protocol
- * can be used in walsender mode. Passing "database" as the value instructs walsender to connect
- * to the database specified in the dbname parameter, which will allow the connection to be used
- * for logical replication from that database.
- *
Parameter should be use together with {@link PGProperty#ASSUME_MIN_SERVER_VERSION} with
- * parameter >= 9.4 (backend >= 9.4)
- */
- REPLICATION(
- "replication",
- null,
- "Connection parameter passed in startup message, one of 'true' or 'database' "
- + "Passing 'true' tells the backend to go into walsender mode, "
- + "wherein a small set of replication commands can be issued instead of SQL statements. "
- + "Only the simple query protocol can be used in walsender mode. "
- + "Passing 'database' as the value instructs walsender to connect "
- + "to the database specified in the dbname parameter, "
- + "which will allow the connection to be used for logical replication "
- + "from that database. "
- + "(backend >= 9.4)"),
-
- /**
- * Configure optimization to enable batch insert re-writing.
- */
- REWRITE_BATCHED_INSERTS(
- "reWriteBatchedInserts",
- "false",
- "Enable optimization to rewrite and collapse compatible INSERT statements that are batched."),
-
- /**
- * Socket write buffer size (SO_SNDBUF). A value of {@code -1}, which is the default, means system
- * default.
- */
- SEND_BUFFER_SIZE(
- "sendBufferSize",
- "-1",
- "Socket write buffer size"),
-
- /**
- * Service name to use for additional parameters. It specifies a service name in "pg_service
- * .conf" that holds additional connection parameters. This allows applications to specify only
- * a service name so connection parameters can be centrally maintained.
- */
- SERVICE(
- "service",
- null,
- "Service name to be searched in pg_service.conf resource"),
-
- /**
- * Socket factory used to create socket. A null value, which is the default, means system default.
- */
- SOCKET_FACTORY(
- "socketFactory",
- null,
- "Specify a socket factory for socket creation"),
-
- /**
- * The String argument to give to the constructor of the Socket Factory.
- */
- SOCKET_FACTORY_ARG(
- "socketFactoryArg",
- null,
- "Argument forwarded to constructor of SocketFactory class."),
-
- /**
- * The timeout value used for socket read operations. If reading from the server takes longer than
- * this value, the connection is closed. This can be used as both a brute force global query
- * timeout and a method of detecting network problems. The timeout is specified in seconds and a
- * value of zero means that it is disabled.
- */
- SOCKET_TIMEOUT(
- "socketTimeout",
- "0",
- "The timeout value in seconds max(2147484) used for socket read operations."),
-
- /**
- * Control use of SSL: empty or {@code true} values imply {@code sslmode==verify-full}
- */
- SSL(
- "ssl",
- null,
- "Control use of SSL (any non-null value causes SSL to be required)"),
-
- /**
- * File containing the SSL Certificate. Default will be the file {@code postgresql.crt} in {@code
- * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
- */
- SSL_CERT(
- "sslcert",
- null,
- "The location of the client's SSL certificate"),
-
- /**
- * Classname of the SSL Factory to use (instance of {@link javax.net.ssl.SSLSocketFactory}).
- */
- SSL_FACTORY(
- "sslfactory",
- "org.postgresql.ssl.LibPQFactory",
- "Provide a SSLSocketFactory class when using SSL."),
-
- /**
- * The String argument to give to the constructor of the SSL Factory.
- * @deprecated use {@code ..Factory(Properties)} constructor.
- */
- @Deprecated
- SSL_FACTORY_ARG(
- "sslfactoryarg",
- null,
- "Argument forwarded to constructor of SSLSocketFactory class."),
-
- /**
- * Classname of the SSL HostnameVerifier to use (instance of {@link javax.net.ssl.HostnameVerifier}).
- */
- SSL_HOSTNAME_VERIFIER(
- "sslhostnameverifier",
- null,
- "A class, implementing javax.net.ssl.HostnameVerifier that can verify the server"),
-
- /**
- * File containing the SSL Key. Default will be the file {@code postgresql.pk8} in {@code $HOME/.postgresql} (*nix)
- * or {@code %APPDATA%\postgresql} (windows).
- */
- SSL_KEY(
- "sslkey",
- null,
- "The location of the client's PKCS#8 SSL key"),
-
- /**
- * Parameter governing the use of SSL. The allowed values are {@code disable}, {@code allow},
- * {@code prefer}, {@code require}, {@code verify-ca}, {@code verify-full}.
- * If {@code ssl} property is empty or set to {@code true} it implies {@code verify-full}.
- * Default mode is "require"
- */
- SSL_MODE(
- "sslmode",
- null,
- "Parameter governing the use of SSL",
- false,
- new String[]{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}),
-
- /**
- * The SSL password to use in the default CallbackHandler.
- */
- SSL_PASSWORD(
- "sslpassword",
- null,
- "The password for the client's ssl key (ignored if sslpasswordcallback is set)"),
-
-
- /**
- * The classname instantiating {@link javax.security.auth.callback.CallbackHandler} to use.
- */
- SSL_PASSWORD_CALLBACK(
- "sslpasswordcallback",
- null,
- "A class, implementing javax.security.auth.callback.CallbackHandler that can handle PasswordCallback for the ssl password."),
-
- /**
- *
After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
- * without a timeout here, the client can wait forever. This timeout will be set before the request and reset after
- */
- SSL_RESPONSE_TIMEOUT(
- "sslResponseTimeout",
- "5000",
- "Time in milliseconds we wait for a response from the server after requesting SSL upgrade"),
-
- /**
- * File containing the root certificate when validating server ({@code sslmode} = {@code
- * verify-ca} or {@code verify-full}). Default will be the file {@code root.crt} in {@code
- * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
- */
- SSL_ROOT_CERT(
- "sslrootcert",
- null,
- "The location of the root certificate for authenticating the server."),
-
- /**
- * Specifies the name of the SSPI service class that forms the service class part of the SPN. The
- * default, {@code POSTGRES}, is almost always correct.
- */
- SSPI_SERVICE_CLASS(
- "sspiServiceClass",
- "POSTGRES",
- "The Windows SSPI service class for SPN"),
-
- /**
- * Bind String to either {@code unspecified} or {@code varchar}. Default is {@code varchar} for
- * 8.0+ backends.
- */
- STRING_TYPE(
- "stringtype",
- null,
- "The type to bind String parameters as (usually 'varchar', 'unspecified' allows implicit casting to other types)",
- false,
- new String[]{"unspecified", "varchar"}),
-
- TARGET_SERVER_TYPE(
- "targetServerType",
- "any",
- "Specifies what kind of server to connect",
- false,
- new String []{"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary", "preferPrimary"}),
-
- /**
- * Enable or disable TCP keep-alive. The default is {@code false}.
- */
- TCP_KEEP_ALIVE(
- "tcpKeepAlive",
- "false",
- "Enable or disable TCP keep-alive. The default is {@code false}."),
-
- TCP_NO_DELAY(
- "tcpNoDelay",
- "true",
- "Enable or disable TCP no delay. The default is (@code true}."
- ),
- /**
- * Specifies the length to return for types of unknown length.
- */
- UNKNOWN_LENGTH(
- "unknownLength",
- Integer.toString(Integer.MAX_VALUE),
- "Specifies the length to return for types of unknown length"),
-
- /**
- * Username to connect to the database as.
- */
- USER(
- "user",
- null,
- "Username to connect to the database as.",
- true),
-
- /**
- * Use SPNEGO in SSPI authentication requests.
- */
- USE_SPNEGO(
- "useSpnego",
- "false",
- "Use SPNEGO in SSPI authentication requests"),
-
- /**
- * Factory class to instantiate factories for XML processing.
- * The default factory disables external entity processing.
- * Legacy behavior with external entity processing can be enabled by specifying a value of LEGACY_INSECURE.
- * Or specify a custom class that implements {@link org.postgresql.xml.PGXmlFactoryFactory}.
- */
- XML_FACTORY_FACTORY(
- "xmlFactoryFactory",
- "",
- "Factory class to instantiate factories for XML processing"),
-
- ;
-
- private final String name;
- private final String defaultValue;
- private final boolean required;
- private final String description;
- private final String [] choices;
-
- PGProperty(String name, String defaultValue, String description) {
- this(name, defaultValue, description, false);
- }
-
- PGProperty(String name, String defaultValue, String description, boolean required) {
- this(name, defaultValue, description, required, (String[]) null);
- }
-
- PGProperty(String name, String defaultValue, String description, boolean required,
- String [] choices) {
- this.name = name;
- this.defaultValue = defaultValue;
- this.required = required;
- this.description = description;
- this.choices = choices;
- }
-
- private static final Map PROPS_BY_NAME = new HashMap<>();
-
- static {
- for (PGProperty prop : PGProperty.values()) {
- if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
- throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
- }
+ /**
+ * Specifies if number of rows, used during fetching rows of a result set, should be computed
+ * dynamically. Number of rows will be calculated by dividing maxResultBuffer size by max row size
+ * observed so far, rounded down. First fetch will have number of rows declared in
+ * defaultRowFetchSize. Number of rows can be limited by adaptiveFetchMinimum and
+ * adaptiveFetchMaximum. Requires declaring of maxResultBuffer and defaultRowFetchSize to work.
+ * Default value is false.
+ */
+ ADAPTIVE_FETCH(
+ "adaptiveFetch",
+ "false",
+ "Specifies if number of rows fetched in ResultSet should be adaptive to maxResultBuffer and max row size."),
+
+ /**
+ * Specifies the highest number of rows which can be calculated by adaptiveFetch. Requires
+ * adaptiveFetch set to true to work. Default value is -1 (used as infinity).
+ */
+ ADAPTIVE_FETCH_MAXIMUM(
+ "adaptiveFetchMaximum",
+ "-1",
+ "Specifies maximum number of rows used by adaptive fetch."),
+
+ /**
+ * Specifies the lowest number of rows which can be calculated by adaptiveFetch. Requires
+ * adaptiveFetch set to true to work. Default value is 0.
+ */
+ ADAPTIVE_FETCH_MINIMUM(
+ "adaptiveFetchMinimum",
+ "0",
+ "Specifies minimum number of rows used by adaptive fetch."),
+
+ /**
+ * When using the V3 protocol the driver monitors changes in certain server configuration
+ * parameters that should not be touched by end users. The {@code client_encoding} setting is set
+ * by the driver and should not be altered. If the driver detects a change it will abort the
+ * connection.
+ */
+ ALLOW_ENCODING_CHANGES(
+ "allowEncodingChanges",
+ "false",
+ "Allow for changes in client_encoding"),
+
+ /**
+ * The application name (require server version >= 9.0).
+ */
+ APPLICATION_NAME(
+ "ApplicationName",
+ DriverInfo.DRIVER_NAME,
+ "Name of the Application (backend >= 9.0)"),
+
+ /**
+ * Assume the server is at least that version.
+ */
+ ASSUME_MIN_SERVER_VERSION(
+ "assumeMinServerVersion",
+ null,
+ "Assume the server is at least that version"),
+
+ /**
+ * AuthenticationPluginClass
+ */
+
+ AUTHENTICATION_PLUGIN_CLASS_NAME(
+ "authenticationPluginClassName",
+ null,
+ "Name of class which implements AuthenticationPlugin"
+ ),
+
+ /**
+ * Specifies what the driver should do if a query fails. In {@code autosave=always} mode, JDBC driver sets a savepoint before each query,
+ * and rolls back to that savepoint in case of failure. In {@code autosave=never} mode (default), no savepoint dance is made ever.
+ * In {@code autosave=conservative} mode, savepoint is set for each query, however the rollback is done only for rare cases
+ * like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries
+ */
+ AUTOSAVE(
+ "autosave",
+ "never",
+ "Specifies what the driver should do if a query fails. In autosave=always mode, JDBC driver sets a savepoint before each query, "
+ + "and rolls back to that savepoint in case of failure. In autosave=never mode (default), no savepoint dance is made ever. "
+ + "In autosave=conservative mode, safepoint is set for each query, however the rollback is done only for rare cases"
+ + " like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries",
+ false,
+ new String[]{"always", "never", "conservative"}),
+
+ /**
+ * Use binary format for sending and receiving data if possible.
+ */
+ BINARY_TRANSFER(
+ "binaryTransfer",
+ "true",
+ "Use binary format for sending and receiving data if possible"),
+
+ /**
+ * Comma separated list of types to disable binary transfer. Either OID numbers or names.
+ * Overrides values in the driver default set and values set with binaryTransferEnable.
+ */
+ BINARY_TRANSFER_DISABLE(
+ "binaryTransferDisable",
+ "",
+ "Comma separated list of types to disable binary transfer. Either OID numbers or names. Overrides values in the driver default set and values set with binaryTransferEnable."),
+
+ /**
+ * Comma separated list of types to enable binary transfer. Either OID numbers or names
+ */
+ BINARY_TRANSFER_ENABLE(
+ "binaryTransferEnable",
+ "",
+ "Comma separated list of types to enable binary transfer. Either OID numbers or names"),
+
+ /**
+ * Cancel command is sent out of band over its own connection, so cancel message can itself get
+ * stuck.
+ * This property controls "connect timeout" and "socket timeout" used for cancel commands.
+ * The timeout is specified in seconds. Default value is 10 seconds.
+ */
+ CANCEL_SIGNAL_TIMEOUT(
+ "cancelSignalTimeout",
+ "10",
+ "The timeout that is used for sending cancel command."),
+
+ /**
+ * Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not
+ */
+ CLEANUP_SAVEPOINTS(
+ "cleanupSavepoints",
+ "false",
+ "Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not",
+ false,
+ new String[]{"true", "false"}),
+
+ /**
+ *
The timeout value used for socket connect operations. If connecting to the server takes longer
+ * than this value, the connection is broken.
+ *
+ *
The timeout is specified in seconds and a value of zero means that it is disabled.
+ */
+ CONNECT_TIMEOUT(
+ "connectTimeout",
+ "10",
+ "The timeout value in seconds used for socket connect operations."),
+
+ /**
+ * Specify the schema (or several schema separated by commas) to be set in the search-path. This schema will be used to resolve
+ * unqualified object names used in statements over this connection.
+ */
+ CURRENT_SCHEMA(
+ "currentSchema",
+ null,
+ "Specify the schema (or several schema separated by commas) to be set in the search-path"),
+
+ /**
+ * Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache.
+ */
+ DATABASE_METADATA_CACHE_FIELDS(
+ "databaseMetadataCacheFields",
+ "65536",
+ "Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache."),
+
+ /**
+ * Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache.
+ */
+ DATABASE_METADATA_CACHE_FIELDS_MIB(
+ "databaseMetadataCacheFieldsMiB",
+ "5",
+ "Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache."),
+
+ /**
+ * Default parameter for {@link java.sql.Statement#getFetchSize()}. A value of {@code 0} means
+ * that need fetch all rows at once
+ */
+ DEFAULT_ROW_FETCH_SIZE(
+ "defaultRowFetchSize",
+ "0",
+ "Positive number of rows that should be fetched from the database when more rows are needed for ResultSet by each fetch iteration"),
+
+ /**
+ * Enable optimization that disables column name sanitiser.
+ */
+ DISABLE_COLUMN_SANITISER(
+ "disableColumnSanitiser",
+ "false",
+ "Enable optimization that disables column name sanitiser"),
+
+ /**
+ * Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)
+ * In {@code escapeSyntaxCallMode=select} mode (the default), the driver always uses a SELECT statement (allowing function invocation only).
+ * In {@code escapeSyntaxCallMode=callIfNoReturn} mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement.
+ * In {@code escapeSyntaxCallMode=call} mode, the driver always uses a CALL statement (allowing procedure invocation only).
+ */
+ ESCAPE_SYNTAX_CALL_MODE(
+ "escapeSyntaxCallMode",
+ "select",
+ "Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)"
+ + "In escapeSyntaxCallMode=select mode (the default), the driver always uses a SELECT statement (allowing function invocation only)."
+ + "In escapeSyntaxCallMode=callIfNoReturn mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement."
+ + "In escapeSyntaxCallMode=call mode, the driver always uses a CALL statement (allowing procedure invocation only).",
+ false,
+ new String[]{"select", "callIfNoReturn", "call"}),
+
+ /**
+ * Group startup parameters in a transaction
+ * This is important in pool-by-transaction scenarios in order to make sure that all the statements
+ * reaches the same connection that is being initialized. All of the startup parameters will be wrapped
+ * in a transaction
+ * Note this is off by default as pgbouncer in statement mode
+ */
+ GROUP_STARTUP_PARAMETERS(
+ "groupStartupParameters",
+ "false",
+ "This is important in pool-by-transaction scenarios in order to make sure that all "
+ + "the statements reaches the same connection that is being initialized."
+ ),
+
+ GSS_ENC_MODE(
+ "gssEncMode",
+ "allow",
+ "Force Encoded GSS Mode",
+ false,
+ new String[]{"disable", "allow", "prefer", "require"}
+ ),
+
+ /**
+ * Force one of
+ *
+ *
SSPI (Windows transparent single-sign-on)
+ *
GSSAPI (Kerberos, via JSSE)
+ *
+ * to be used when the server requests Kerberos or SSPI authentication.
+ */
+ GSS_LIB(
+ "gsslib",
+ "auto",
+ "Force SSSPI or GSSAPI",
+ false,
+ new String[]{"auto", "sspi", "gssapi"}),
+
+ /**
+ *
After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
+ * without a timeout here, the client can wait forever. The pattern for requesting a GSS encrypted connection is the same so we provide the same
+ * timeout mechanism This timeout will be set before the request and reset after
+ */
+ GSS_RESPONSE_TIMEOUT(
+ "gssResponseTimeout",
+ "5000",
+ "Time in milliseconds we wait for a response from the server after requesting a GSS upgrade"),
+
+
+ /**
+ * Enable mode to filter out the names of database objects for which the current user has no privileges
+ * granted from appearing in the DatabaseMetaData returned by the driver.
+ */
+ HIDE_UNPRIVILEGED_OBJECTS(
+ "hideUnprivilegedObjects",
+ "false",
+ "Enable hiding of database objects for which the current user has no privileges granted from the DatabaseMetaData"),
+
+ HOST_RECHECK_SECONDS(
+ "hostRecheckSeconds",
+ "10",
+ "Specifies period (seconds) after which the host status is checked again in case it has changed"),
+
+ /**
+ * Specifies the name of the JAAS system or application login configuration.
+ */
+ JAAS_APPLICATION_NAME(
+ "jaasApplicationName",
+ "pgjdbc",
+ "Specifies the name of the JAAS system or application login configuration."),
+
+ /**
+ * Flag to enable/disable obtaining a GSS credential via JAAS login before authenticating.
+ * Useful if setting system property javax.security.auth.useSubjectCredsOnly=false
+ * or using native GSS with system property sun.security.jgss.native=true
+ */
+ JAAS_LOGIN(
+ "jaasLogin",
+ "true",
+ "Login with JAAS before doing GSSAPI authentication"),
+
+ /**
+ * The Kerberos service name to use when authenticating with GSSAPI. This is equivalent to libpq's
+ * PGKRBSRVNAME environment variable.
+ */
+ KERBEROS_SERVER_NAME(
+ "kerberosServerName",
+ null,
+ "The Kerberos service name to use when authenticating with GSSAPI."),
+
+ LOAD_BALANCE_HOSTS(
+ "loadBalanceHosts",
+ "false",
+ "If disabled hosts are connected in the given order. If enabled hosts are chosen randomly from the set of suitable candidates"),
+
+ /**
+ *
If this is set then the client side will bind to this address. This is useful if you need
+ * to choose which interface to connect to.
+ */
+ LOCAL_SOCKET_ADDRESS(
+ "localSocketAddress",
+ null,
+ "Local Socket address, if set bind the client side of the socket to this address"),
+
+ /**
+ * This property is no longer used by the driver and will be ignored.
+ *
+ * @deprecated Logging is configured via java.util.logging.
+ */
+ @Deprecated
+ LOGGER_FILE(
+ "loggerFile",
+ null,
+ "File name output of the Logger"),
+
+ /**
+ * This property is no longer used by the driver and will be ignored.
+ *
+ * @deprecated Logging is configured via java.util.logging.
+ */
+ @Deprecated
+ LOGGER_LEVEL(
+ "loggerLevel",
+ null,
+ "Logger level of the driver",
+ false,
+ new String[]{"OFF", "DEBUG", "TRACE"}),
+
+ /**
+ * Specify how long to wait for establishment of a database connection. The timeout is specified
+ * in seconds.
+ */
+ LOGIN_TIMEOUT(
+ "loginTimeout",
+ "0",
+ "Specify how long in seconds to wait for establishment of a database connection."),
+
+ /**
+ * Whether to include full server error detail in exception messages.
+ */
+ LOG_SERVER_ERROR_DETAIL(
+ "logServerErrorDetail",
+ "true",
+ "Include full server error detail in exception messages. If disabled then only the error itself will be included."),
+
+ /**
+ * When connections that are not explicitly closed are garbage collected, log the stacktrace from
+ * the opening of the connection to trace the leak source.
+ */
+ LOG_UNCLOSED_CONNECTIONS(
+ "logUnclosedConnections",
+ "false",
+ "When connections that are not explicitly closed are garbage collected, log the stacktrace from the opening of the connection to trace the leak source"),
+
+ /**
+ * Specifies size of buffer during fetching result set. Can be specified as specified size or
+ * percent of heap memory.
+ */
+ MAX_RESULT_BUFFER(
+ "maxResultBuffer",
+ null,
+ "Specifies size of buffer during fetching result set. Can be specified as specified size or percent of heap memory."),
+
+ /**
+ * Specify 'options' connection initialization parameter.
+ * The value of this parameter may contain spaces and other special characters or their URL representation.
+ */
+ OPTIONS(
+ "options",
+ null,
+ "Specify 'options' connection initialization parameter."),
+
+ /**
+ * Password to use when authenticating.
+ */
+ PASSWORD(
+ "password",
+ null,
+ "Password to use when authenticating.",
+ false),
+
+ /**
+ * Database name to connect to (may be specified directly in the JDBC URL).
+ */
+ PG_DBNAME(
+ "PGDBNAME",
+ null,
+ "Database name to connect to (may be specified directly in the JDBC URL)",
+ true),
+
+ /**
+ * Hostname of the PostgreSQL server (may be specified directly in the JDBC URL).
+ */
+ PG_HOST(
+ "PGHOST",
+ "localhost",
+ "Hostname of the PostgreSQL server (may be specified directly in the JDBC URL)",
+ false),
+
+ /**
+ * Port of the PostgreSQL server (may be specified directly in the JDBC URL).
+ */
+ PG_PORT(
+ "PGPORT",
+ "5432",
+ "Port of the PostgreSQL server (may be specified directly in the JDBC URL)"),
+
+ /**
+ *
Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only),
+ * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only,
+ * extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.
+ *
+ *
This mode is meant for debugging purposes and/or for cases when extended protocol cannot be used (e.g. logical replication protocol)
+ */
+ PREFER_QUERY_MODE(
+ "preferQueryMode",
+ "extended",
+ "Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), "
+ + "extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, "
+ + "extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.", false,
+ new String[]{"extended", "extendedForPrepared", "extendedCacheEverything", "simple"}),
+
+ /**
+ * Specifies the maximum number of entries in cache of prepared statements. A value of {@code 0}
+ * disables the cache.
+ */
+ PREPARED_STATEMENT_CACHE_QUERIES(
+ "preparedStatementCacheQueries",
+ "256",
+ "Specifies the maximum number of entries in per-connection cache of prepared statements. A value of {@code 0} disables the cache."),
+
+ /**
+ * Specifies the maximum size (in megabytes) of the prepared statement cache. A value of {@code 0}
+ * disables the cache.
+ */
+ PREPARED_STATEMENT_CACHE_SIZE_MIB(
+ "preparedStatementCacheSizeMiB",
+ "5",
+ "Specifies the maximum size (in megabytes) of a per-connection prepared statement cache. A value of {@code 0} disables the cache."),
+
+ /**
+ * Sets the default threshold for enabling server-side prepare. A value of {@code -1} stands for
+ * forceBinary
+ */
+ PREPARE_THRESHOLD(
+ "prepareThreshold",
+ "5",
+ "Statement prepare threshold. A value of {@code -1} stands for forceBinary"),
+
+ /**
+ * Force use of a particular protocol version when connecting, if set, disables protocol version
+ * fallback.
+ */
+ PROTOCOL_VERSION(
+ "protocolVersion",
+ null,
+ "Force use of a particular protocol version when connecting, currently only version 3 is supported.",
+ false,
+ new String[]{"3"}),
+
+ /**
+ * Quote returning columns.
+ * There are some ORM's that quote everything, including returning columns
+ * If we quote them, then we end up sending ""colname"" to the backend
+ * which will not be found
+ */
+ QUOTE_RETURNING_IDENTIFIERS(
+ "quoteReturningIdentifiers",
+ "true",
+ "Quote identifiers provided in returning array",
+ false),
+ /**
+ * Puts this connection in read-only mode.
+ */
+ READ_ONLY(
+ "readOnly",
+ "false",
+ "Puts this connection in read-only mode"),
+
+ /**
+ * Connection parameter to control behavior when
+ * {@link Connection#setReadOnly(boolean)} is set to {@code true}.
+ */
+ READ_ONLY_MODE(
+ "readOnlyMode",
+ "transaction",
+ "Controls the behavior when a connection is set to be read only, one of 'ignore', 'transaction', or 'always' "
+ + "When 'ignore', setting readOnly has no effect. "
+ + "When 'transaction' setting readOnly to 'true' will cause transactions to BEGIN READ ONLY if autocommit is 'false'. "
+ + "When 'always' setting readOnly to 'true' will set the session to READ ONLY if autoCommit is 'true' "
+ + "and the transaction to BEGIN READ ONLY if autocommit is 'false'.",
+ false,
+ new String[]{"ignore", "transaction", "always"}),
+
+ /**
+ * Socket read buffer size (SO_RECVBUF). A value of {@code -1}, which is the default, means system
+ * default.
+ */
+ RECEIVE_BUFFER_SIZE(
+ "receiveBufferSize",
+ "-1",
+ "Socket read buffer size"),
+
+ /**
+ *
Connection parameter passed in the startup message. This parameter accepts two values; "true"
+ * and "database". Passing "true" tells the backend to go into walsender mode, wherein a small set
+ * of replication commands can be issued instead of SQL statements. Only the simple query protocol
+ * can be used in walsender mode. Passing "database" as the value instructs walsender to connect
+ * to the database specified in the dbname parameter, which will allow the connection to be used
+ * for logical replication from that database.
+ *
Parameter should be use together with {@link PGProperty#ASSUME_MIN_SERVER_VERSION} with
+ * parameter >= 9.4 (backend >= 9.4)
+ */
+ REPLICATION(
+ "replication",
+ null,
+ "Connection parameter passed in startup message, one of 'true' or 'database' "
+ + "Passing 'true' tells the backend to go into walsender mode, "
+ + "wherein a small set of replication commands can be issued instead of SQL statements. "
+ + "Only the simple query protocol can be used in walsender mode. "
+ + "Passing 'database' as the value instructs walsender to connect "
+ + "to the database specified in the dbname parameter, "
+ + "which will allow the connection to be used for logical replication "
+ + "from that database. "
+ + "(backend >= 9.4)"),
+
+ /**
+ * Configure optimization to enable batch insert re-writing.
+ */
+ REWRITE_BATCHED_INSERTS(
+ "reWriteBatchedInserts",
+ "false",
+ "Enable optimization to rewrite and collapse compatible INSERT statements that are batched."),
+
+ /**
+ * Socket write buffer size (SO_SNDBUF). A value of {@code -1}, which is the default, means system
+ * default.
+ */
+ SEND_BUFFER_SIZE(
+ "sendBufferSize",
+ "-1",
+ "Socket write buffer size"),
+
+ /**
+ * Service name to use for additional parameters. It specifies a service name in "pg_service
+ * .conf" that holds additional connection parameters. This allows applications to specify only
+ * a service name so connection parameters can be centrally maintained.
+ */
+ SERVICE(
+ "service",
+ null,
+ "Service name to be searched in pg_service.conf resource"),
+
+ /**
+ * Socket factory used to create socket. A null value, which is the default, means system default.
+ */
+ SOCKET_FACTORY(
+ "socketFactory",
+ null,
+ "Specify a socket factory for socket creation"),
+
+ /**
+ * The String argument to give to the constructor of the Socket Factory.
+ */
+ SOCKET_FACTORY_ARG(
+ "socketFactoryArg",
+ null,
+ "Argument forwarded to constructor of SocketFactory class."),
+
+ /**
+ * The timeout value used for socket read operations. If reading from the server takes longer than
+ * this value, the connection is closed. This can be used as both a brute force global query
+ * timeout and a method of detecting network problems. The timeout is specified in seconds and a
+ * value of zero means that it is disabled.
+ */
+ SOCKET_TIMEOUT(
+ "socketTimeout",
+ "0",
+ "The timeout value in seconds max(2147484) used for socket read operations."),
+
+ /**
+ * Control use of SSL: empty or {@code true} values imply {@code sslmode==verify-full}
+ */
+ SSL(
+ "ssl",
+ null,
+ "Control use of SSL (any non-null value causes SSL to be required)"),
+
+ /**
+ * File containing the SSL Certificate. Default will be the file {@code postgresql.crt} in {@code
+ * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
+ */
+ SSL_CERT(
+ "sslcert",
+ null,
+ "The location of the client's SSL certificate"),
+
+ /**
+ * Classname of the SSL Factory to use (instance of {@link javax.net.ssl.SSLSocketFactory}).
+ */
+ SSL_FACTORY(
+ "sslfactory",
+ "org.postgresql.ssl.LibPQFactory",
+ "Provide a SSLSocketFactory class when using SSL."),
+
+ /**
+ * The String argument to give to the constructor of the SSL Factory.
+ *
+ * @deprecated use {@code ..Factory(Properties)} constructor.
+ */
+ @Deprecated
+ SSL_FACTORY_ARG(
+ "sslfactoryarg",
+ null,
+ "Argument forwarded to constructor of SSLSocketFactory class."),
+
+ /**
+ * Classname of the SSL HostnameVerifier to use (instance of {@link javax.net.ssl.HostnameVerifier}).
+ */
+ SSL_HOSTNAME_VERIFIER(
+ "sslhostnameverifier",
+ null,
+ "A class, implementing javax.net.ssl.HostnameVerifier that can verify the server"),
+
+ /**
+ * File containing the SSL Key. Default will be the file {@code postgresql.pk8} in {@code $HOME/.postgresql} (*nix)
+ * or {@code %APPDATA%\postgresql} (windows).
+ */
+ SSL_KEY(
+ "sslkey",
+ null,
+ "The location of the client's PKCS#8 SSL key"),
+
+ /**
+ * Parameter governing the use of SSL. The allowed values are {@code disable}, {@code allow},
+ * {@code prefer}, {@code require}, {@code verify-ca}, {@code verify-full}.
+ * If {@code ssl} property is empty or set to {@code true} it implies {@code verify-full}.
+ * Default mode is "require"
+ */
+ SSL_MODE(
+ "sslmode",
+ null,
+ "Parameter governing the use of SSL",
+ false,
+ new String[]{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}),
+
+ /**
+ * The SSL password to use in the default CallbackHandler.
+ */
+ SSL_PASSWORD(
+ "sslpassword",
+ null,
+ "The password for the client's ssl key (ignored if sslpasswordcallback is set)"),
+
+
+ /**
+ * The classname instantiating {@link javax.security.auth.callback.CallbackHandler} to use.
+ */
+ SSL_PASSWORD_CALLBACK(
+ "sslpasswordcallback",
+ null,
+ "A class, implementing javax.security.auth.callback.CallbackHandler that can handle PasswordCallback for the ssl password."),
+
+ /**
+ *
After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
+ * without a timeout here, the client can wait forever. This timeout will be set before the request and reset after
+ */
+ SSL_RESPONSE_TIMEOUT(
+ "sslResponseTimeout",
+ "5000",
+ "Time in milliseconds we wait for a response from the server after requesting SSL upgrade"),
+
+ /**
+ * File containing the root certificate when validating server ({@code sslmode} = {@code
+ * verify-ca} or {@code verify-full}). Default will be the file {@code root.crt} in {@code
+ * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
+ */
+ SSL_ROOT_CERT(
+ "sslrootcert",
+ null,
+ "The location of the root certificate for authenticating the server."),
+
+ /**
+ * Specifies the name of the SSPI service class that forms the service class part of the SPN. The
+ * default, {@code POSTGRES}, is almost always correct.
+ */
+ SSPI_SERVICE_CLASS(
+ "sspiServiceClass",
+ "POSTGRES",
+ "The Windows SSPI service class for SPN"),
+
+ /**
+ * Bind String to either {@code unspecified} or {@code varchar}. Default is {@code varchar} for
+ * 8.0+ backends.
+ */
+ STRING_TYPE(
+ "stringtype",
+ null,
+ "The type to bind String parameters as (usually 'varchar', 'unspecified' allows implicit casting to other types)",
+ false,
+ new String[]{"unspecified", "varchar"}),
+
+ TARGET_SERVER_TYPE(
+ "targetServerType",
+ "any",
+ "Specifies what kind of server to connect",
+ false,
+ new String[]{"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary", "preferPrimary"}),
+
+ /**
+ * Enable or disable TCP keep-alive. The default is {@code false}.
+ */
+ TCP_KEEP_ALIVE(
+ "tcpKeepAlive",
+ "false",
+ "Enable or disable TCP keep-alive. The default is {@code false}."),
+
+ TCP_NO_DELAY(
+ "tcpNoDelay",
+ "true",
+ "Enable or disable TCP no delay. The default is (@code true}."
+ ),
+ /**
+ * Specifies the length to return for types of unknown length.
+ */
+ UNKNOWN_LENGTH(
+ "unknownLength",
+ Integer.toString(Integer.MAX_VALUE),
+ "Specifies the length to return for types of unknown length"),
+
+ /**
+ * Username to connect to the database as.
+ */
+ USER(
+ "user",
+ null,
+ "Username to connect to the database as.",
+ true),
+
+ /**
+ * Use SPNEGO in SSPI authentication requests.
+ */
+ USE_SPNEGO(
+ "useSpnego",
+ "false",
+ "Use SPNEGO in SSPI authentication requests"),
+
+ /**
+ * Factory class to instantiate factories for XML processing.
+ * The default factory disables external entity processing.
+ * Legacy behavior with external entity processing can be enabled by specifying a value of LEGACY_INSECURE.
+ * Or specify a custom class that implements {@link org.postgresql.xml.PGXmlFactoryFactory}.
+ */
+ XML_FACTORY_FACTORY(
+ "xmlFactoryFactory",
+ "",
+ "Factory class to instantiate factories for XML processing"),
+
+ ;
+
+ private static final Map PROPS_BY_NAME = new HashMap<>();
+
+ static {
+ for (PGProperty prop : PGProperty.values()) {
+ if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
+ throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
+ }
+ }
}
- }
- /**
- * Returns the name of the connection parameter. The name is the key that must be used in JDBC URL
- * or in Driver properties
- *
- * @return the name of the connection parameter
- */
- public String getName() {
- return name;
- }
+ private final String name;
+ private final String defaultValue;
+ private final boolean required;
+ private final String description;
+ private final String[] choices;
- /**
- * Returns the default value for this connection parameter.
- *
- * @return the default value for this connection parameter or null
- */
- public String getDefaultValue() {
- return defaultValue;
- }
-
- /**
- * Returns whether this parameter is required.
- *
- * @return whether this parameter is required
- */
- public boolean isRequired() {
- return required;
- }
-
- /**
- * Returns the description for this connection parameter.
- *
- * @return the description for this connection parameter
- */
- public String getDescription() {
- return description;
- }
-
- /**
- * Returns the available values for this connection parameter.
- *
- * @return the available values for this connection parameter or null
- */
- public String [] getChoices() {
- return choices;
- }
-
- /**
- * Returns the value of the connection parameter from the given {@link Properties} or the
- * default value.
- *
- * @param properties properties to take actual value from
- * @return evaluated value for this connection parameter
- */
- public String getOrDefault(Properties properties) {
- return properties.getProperty(name, defaultValue);
- }
-
- /**
- * Returns the value of the connection parameter from the given {@link Properties} or the
- * default value
- * @param properties properties to take actual value from
- * @return evaluated value for this connection parameter or null
- * @deprecated use {@link #getOrDefault(Properties)} instead
- */
- @Deprecated
- public String get(Properties properties) {
- return getOrDefault(properties);
- }
-
- /**
- * Returns the value of the connection parameter from the given {@link Properties} or null if there
- * is no default value
- * @param properties properties object to get value from
- * @return evaluated value for this connection parameter
- */
- public String getOrNull(Properties properties) {
- return properties.getProperty(name);
- }
-
- /**
- * Set the value for this connection parameter in the given {@link Properties}.
- *
- * @param properties properties in which the value should be set
- * @param value value for this connection parameter
- */
- public void set(Properties properties, String value) {
- if (value == null) {
- properties.remove(name);
- } else {
- properties.setProperty(name, value);
+ PGProperty(String name, String defaultValue, String description) {
+ this(name, defaultValue, description, false);
}
- }
- /**
- * Return the boolean value for this connection parameter in the given {@link Properties}.
- *
- * @param properties properties to take actual value from
- * @return evaluated value for this connection parameter converted to boolean
- */
- public boolean getBoolean(Properties properties) {
- return Boolean.parseBoolean(getOrDefault(properties));
- }
-
- /**
- * Return the int value for this connection parameter in the given {@link Properties}. Prefer the
- * use of {@link #getInt(Properties)} anywhere you can throw an {@link java.sql.SQLException}.
- *
- * @param properties properties to take actual value from
- * @return evaluated value for this connection parameter converted to int
- * @throws NumberFormatException if it cannot be converted to int.
- */
- @SuppressWarnings("nullness:argument")
- public int getIntNoCheck(Properties properties) {
- String value = getOrDefault(properties);
- return Integer.parseInt(value);
- }
-
- /**
- * Return the int value for this connection parameter in the given {@link Properties}.
- *
- * @param properties properties to take actual value from
- * @return evaluated value for this connection parameter converted to int
- * @throws PSQLException if it cannot be converted to int.
- */
- @SuppressWarnings("nullness:argument")
- public int getInt(Properties properties) throws PSQLException {
- String value = getOrDefault(properties);
- try {
- return Integer.parseInt(value);
- } catch (NumberFormatException nfe) {
- throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
- getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+ PGProperty(String name, String defaultValue, String description, boolean required) {
+ this(name, defaultValue, description, required, (String[]) null);
}
- }
- /**
- * Return the {@link Integer} value for this connection parameter in the given {@link Properties}.
- *
- * @param properties properties to take actual value from
- * @return evaluated value for this connection parameter converted to Integer or null
- * @throws PSQLException if unable to parse property as integer
- */
- public Integer getInteger(Properties properties) throws PSQLException {
- String value = getOrDefault(properties);
- if (value == null) {
- return null;
+ PGProperty(String name, String defaultValue, String description, boolean required,
+ String[] choices) {
+ this.name = name;
+ this.defaultValue = defaultValue;
+ this.required = required;
+ this.description = description;
+ this.choices = choices;
}
- try {
- return Integer.parseInt(value);
- } catch (NumberFormatException nfe) {
- throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
- getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+
+ public static PGProperty forName(String name) {
+ return PROPS_BY_NAME.get(name);
}
- }
- /**
- * Set the boolean value for this connection parameter in the given {@link Properties}.
- *
- * @param properties properties in which the value should be set
- * @param value boolean value for this connection parameter
- */
- public void set(Properties properties, boolean value) {
- properties.setProperty(name, Boolean.toString(value));
- }
-
- /**
- * Set the int value for this connection parameter in the given {@link Properties}.
- *
- * @param properties properties in which the value should be set
- * @param value int value for this connection parameter
- */
- public void set(Properties properties, int value) {
- properties.setProperty(name, Integer.toString(value));
- }
-
- /**
- * Test whether this property is present in the given {@link Properties}.
- *
- * @param properties set of properties to check current in
- * @return true if the parameter is specified in the given properties
- */
- public boolean isPresent(Properties properties) {
- return getSetString(properties) != null;
- }
-
- /**
- * Convert this connection parameter and the value read from the given {@link Properties} into a
- * {@link DriverPropertyInfo}.
- *
- * @param properties properties to take actual value from
- * @return a DriverPropertyInfo representing this connection parameter
- */
- public DriverPropertyInfo toDriverPropertyInfo(Properties properties) {
- DriverPropertyInfo propertyInfo = new DriverPropertyInfo(name, getOrDefault(properties));
- propertyInfo.required = required;
- propertyInfo.description = description;
- propertyInfo.choices = choices;
- return propertyInfo;
- }
-
- public static PGProperty forName(String name) {
- return PROPS_BY_NAME.get(name);
- }
-
- /**
- * Return the property if exists but avoiding the default. Allowing the caller to detect the lack
- * of a property.
- *
- * @param properties properties bundle
- * @return the value of a set property
- */
- public String getSetString(Properties properties) {
- Object o = properties.get(name);
- if (o instanceof String) {
- return (String) o;
+ /**
+ * Returns the name of the connection parameter. The name is the key that must be used in JDBC URL
+ * or in Driver properties
+ *
+ * @return the name of the connection parameter
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Returns the default value for this connection parameter.
+ *
+ * @return the default value for this connection parameter or null
+ */
+ public String getDefaultValue() {
+ return defaultValue;
+ }
+
+ /**
+ * Returns whether this parameter is required.
+ *
+ * @return whether this parameter is required
+ */
+ public boolean isRequired() {
+ return required;
+ }
+
+ /**
+ * Returns the description for this connection parameter.
+ *
+ * @return the description for this connection parameter
+ */
+ public String getDescription() {
+ return description;
+ }
+
+ /**
+ * Returns the available values for this connection parameter.
+ *
+ * @return the available values for this connection parameter or null
+ */
+ public String[] getChoices() {
+ return choices;
+ }
+
+ /**
+ * Returns the value of the connection parameter from the given {@link Properties} or the
+ * default value.
+ *
+ * @param properties properties to take actual value from
+ * @return evaluated value for this connection parameter
+ */
+ public String getOrDefault(Properties properties) {
+ return properties.getProperty(name, defaultValue);
+ }
+
+ /**
+ * Returns the value of the connection parameter from the given {@link Properties} or the
+ * default value
+ *
+ * @param properties properties to take actual value from
+ * @return evaluated value for this connection parameter or null
+ * @deprecated use {@link #getOrDefault(Properties)} instead
+ */
+ @Deprecated
+ public String get(Properties properties) {
+ return getOrDefault(properties);
+ }
+
+ /**
+ * Returns the value of the connection parameter from the given {@link Properties} or null if there
+ * is no default value
+ *
+ * @param properties properties object to get value from
+ * @return evaluated value for this connection parameter
+ */
+ public String getOrNull(Properties properties) {
+ return properties.getProperty(name);
+ }
+
+ /**
+ * Set the value for this connection parameter in the given {@link Properties}.
+ *
+ * @param properties properties in which the value should be set
+ * @param value value for this connection parameter
+ */
+ public void set(Properties properties, String value) {
+ if (value == null) {
+ properties.remove(name);
+ } else {
+ properties.setProperty(name, value);
+ }
+ }
+
+ /**
+ * Return the boolean value for this connection parameter in the given {@link Properties}.
+ *
+ * @param properties properties to take actual value from
+ * @return evaluated value for this connection parameter converted to boolean
+ */
+ public boolean getBoolean(Properties properties) {
+ return Boolean.parseBoolean(getOrDefault(properties));
+ }
+
+ /**
+ * Return the int value for this connection parameter in the given {@link Properties}. Prefer the
+ * use of {@link #getInt(Properties)} anywhere you can throw an {@link java.sql.SQLException}.
+ *
+ * @param properties properties to take actual value from
+ * @return evaluated value for this connection parameter converted to int
+ * @throws NumberFormatException if it cannot be converted to int.
+ */
+ @SuppressWarnings("nullness:argument")
+ public int getIntNoCheck(Properties properties) {
+ String value = getOrDefault(properties);
+ return Integer.parseInt(value);
+ }
+
+ /**
+ * Return the int value for this connection parameter in the given {@link Properties}.
+ *
+ * @param properties properties to take actual value from
+ * @return evaluated value for this connection parameter converted to int
+ * @throws PSQLException if it cannot be converted to int.
+ */
+ @SuppressWarnings("nullness:argument")
+ public int getInt(Properties properties) throws PSQLException {
+ String value = getOrDefault(properties);
+ try {
+ return Integer.parseInt(value);
+ } catch (NumberFormatException nfe) {
+ throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
+ getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+ }
+ }
+
+ /**
+ * Return the {@link Integer} value for this connection parameter in the given {@link Properties}.
+ *
+ * @param properties properties to take actual value from
+ * @return evaluated value for this connection parameter converted to Integer or null
+ * @throws PSQLException if unable to parse property as integer
+ */
+ public Integer getInteger(Properties properties) throws PSQLException {
+ String value = getOrDefault(properties);
+ if (value == null) {
+ return null;
+ }
+ try {
+ return Integer.parseInt(value);
+ } catch (NumberFormatException nfe) {
+ throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
+ getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+ }
+ }
+
+ /**
+ * Set the boolean value for this connection parameter in the given {@link Properties}.
+ *
+ * @param properties properties in which the value should be set
+ * @param value boolean value for this connection parameter
+ */
+ public void set(Properties properties, boolean value) {
+ properties.setProperty(name, Boolean.toString(value));
+ }
+
+ /**
+ * Set the int value for this connection parameter in the given {@link Properties}.
+ *
+ * @param properties properties in which the value should be set
+ * @param value int value for this connection parameter
+ */
+ public void set(Properties properties, int value) {
+ properties.setProperty(name, Integer.toString(value));
+ }
+
+ /**
+ * Test whether this property is present in the given {@link Properties}.
+ *
+ * @param properties set of properties to check current in
+ * @return true if the parameter is specified in the given properties
+ */
+ public boolean isPresent(Properties properties) {
+ return getSetString(properties) != null;
+ }
+
+ /**
+ * Convert this connection parameter and the value read from the given {@link Properties} into a
+ * {@link DriverPropertyInfo}.
+ *
+ * @param properties properties to take actual value from
+ * @return a DriverPropertyInfo representing this connection parameter
+ */
+ public DriverPropertyInfo toDriverPropertyInfo(Properties properties) {
+ DriverPropertyInfo propertyInfo = new DriverPropertyInfo(name, getOrDefault(properties));
+ propertyInfo.required = required;
+ propertyInfo.description = description;
+ propertyInfo.choices = choices;
+ return propertyInfo;
+ }
+
+ /**
+ * Return the property if exists but avoiding the default. Allowing the caller to detect the lack
+ * of a property.
+ *
+ * @param properties properties bundle
+ * @return the value of a set property
+ */
+ public String getSetString(Properties properties) {
+ Object o = properties.get(name);
+ if (o instanceof String) {
+ return (String) o;
+ }
+ return null;
}
- return null;
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java b/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java
index 8fc678b..e1692a2 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java
@@ -9,17 +9,17 @@ package org.postgresql;
* A ref cursor based result set.
*
* @deprecated As of 8.0, this interface is only present for backwards- compatibility purposes. New
- * code should call getString() on the ResultSet that contains the refcursor to obtain
- * the underlying cursor name.
+ * code should call getString() on the ResultSet that contains the refcursor to obtain
+ * the underlying cursor name.
*/
@Deprecated
public interface PGRefCursorResultSet {
- /**
- * @return the name of the cursor.
- * @deprecated As of 8.0, replaced with calling getString() on the ResultSet that this ResultSet
- * was obtained from.
- */
- @Deprecated
- String getRefCursor();
+ /**
+ * @return the name of the cursor.
+ * @deprecated As of 8.0, replaced with calling getString() on the ResultSet that this ResultSet
+ * was obtained from.
+ */
+ @Deprecated
+ String getRefCursor();
}
diff --git a/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java b/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java
index b0575cc..bd51047 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java
@@ -11,45 +11,45 @@ import java.sql.SQLException;
public interface PGResultSetMetaData {
- /**
- * Returns the underlying column name of a query result, or "" if it is unable to be determined.
- *
- * @param column column position (1-based)
- * @return underlying column name of a query result
- * @throws SQLException if something wrong happens
- * @since 8.0
- */
- String getBaseColumnName(int column) throws SQLException;
+ /**
+ * Returns the underlying column name of a query result, or "" if it is unable to be determined.
+ *
+ * @param column column position (1-based)
+ * @return underlying column name of a query result
+ * @throws SQLException if something wrong happens
+ * @since 8.0
+ */
+ String getBaseColumnName(int column) throws SQLException;
- /**
- * Returns the underlying table name of query result, or "" if it is unable to be determined.
- *
- * @param column column position (1-based)
- * @return underlying table name of query result
- * @throws SQLException if something wrong happens
- * @since 8.0
- */
- String getBaseTableName(int column) throws SQLException;
+ /**
+ * Returns the underlying table name of query result, or "" if it is unable to be determined.
+ *
+ * @param column column position (1-based)
+ * @return underlying table name of query result
+ * @throws SQLException if something wrong happens
+ * @since 8.0
+ */
+ String getBaseTableName(int column) throws SQLException;
- /**
- * Returns the underlying schema name of query result, or "" if it is unable to be determined.
- *
- * @param column column position (1-based)
- * @return underlying schema name of query result
- * @throws SQLException if something wrong happens
- * @since 8.0
- */
- String getBaseSchemaName(int column) throws SQLException;
+ /**
+ * Returns the underlying schema name of query result, or "" if it is unable to be determined.
+ *
+ * @param column column position (1-based)
+ * @return underlying schema name of query result
+ * @throws SQLException if something wrong happens
+ * @since 8.0
+ */
+ String getBaseSchemaName(int column) throws SQLException;
- /**
- * Is a column Text or Binary?
- *
- * @param column column position (1-based)
- * @return 0 if column data format is TEXT, or 1 if BINARY
- * @throws SQLException if something wrong happens
- * @see Field#BINARY_FORMAT
- * @see Field#TEXT_FORMAT
- * @since 9.4
- */
- int getFormat(int column) throws SQLException;
+ /**
+ * Is a column Text or Binary?
+ *
+ * @param column column position (1-based)
+ * @return 0 if column data format is TEXT, or 1 if BINARY
+ * @throws SQLException if something wrong happens
+ * @see Field#BINARY_FORMAT
+ * @see Field#TEXT_FORMAT
+ * @since 9.4
+ */
+ int getFormat(int column) throws SQLException;
}
diff --git a/pgjdbc/src/main/java/org/postgresql/PGStatement.java b/pgjdbc/src/main/java/org/postgresql/PGStatement.java
index 8a79ba9..521125a 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGStatement.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGStatement.java
@@ -12,86 +12,86 @@ import java.sql.SQLException;
* constructed by the PostgreSQL driver implement PGStatement.
*/
public interface PGStatement {
- // We can't use Long.MAX_VALUE or Long.MIN_VALUE for java.sql.date
- // because this would break the 'normalization contract' of the
- // java.sql.Date API.
- // The follow values are the nearest MAX/MIN values with hour,
- // minute, second, millisecond set to 0 - this is used for
- // -infinity / infinity representation in Java
- long DATE_POSITIVE_INFINITY = 9223372036825200000L;
- long DATE_NEGATIVE_INFINITY = -9223372036832400000L;
- long DATE_POSITIVE_SMALLER_INFINITY = 185543533774800000L;
- long DATE_NEGATIVE_SMALLER_INFINITY = -185543533774800000L;
+ // We can't use Long.MAX_VALUE or Long.MIN_VALUE for java.sql.date
+ // because this would break the 'normalization contract' of the
+ // java.sql.Date API.
+ // The follow values are the nearest MAX/MIN values with hour,
+ // minute, second, millisecond set to 0 - this is used for
+ // -infinity / infinity representation in Java
+ long DATE_POSITIVE_INFINITY = 9223372036825200000L;
+ long DATE_NEGATIVE_INFINITY = -9223372036832400000L;
+ long DATE_POSITIVE_SMALLER_INFINITY = 185543533774800000L;
+ long DATE_NEGATIVE_SMALLER_INFINITY = -185543533774800000L;
- /**
- * Returns the Last inserted/updated oid.
- *
- * @return OID of last insert
- * @throws SQLException if something goes wrong
- * @since 7.3
- */
- long getLastOID() throws SQLException;
+ /**
+ * Returns the Last inserted/updated oid.
+ *
+ * @return OID of last insert
+ * @throws SQLException if something goes wrong
+ * @since 7.3
+ */
+ long getLastOID() throws SQLException;
- /**
- * Turn on the use of prepared statements in the server (server side prepared statements are
- * unrelated to jdbc PreparedStatements) As of build 302, this method is equivalent to
- * setPrepareThreshold(1).
- *
- * @param flag use server prepare
- * @throws SQLException if something goes wrong
- * @since 7.3
- * @deprecated As of build 302, replaced by {@link #setPrepareThreshold(int)}
- */
- @Deprecated
- void setUseServerPrepare(boolean flag) throws SQLException;
+ /**
+ * Checks if this statement will be executed as a server-prepared statement. A return value of
+ * true indicates that the next execution of the statement will be done as a
+ * server-prepared statement, assuming the underlying protocol supports it.
+ *
+ * @return true if the next reuse of this statement will use a server-prepared statement
+ */
+ boolean isUseServerPrepare();
- /**
- * Checks if this statement will be executed as a server-prepared statement. A return value of
- * true indicates that the next execution of the statement will be done as a
- * server-prepared statement, assuming the underlying protocol supports it.
- *
- * @return true if the next reuse of this statement will use a server-prepared statement
- */
- boolean isUseServerPrepare();
+ /**
+ * Turn on the use of prepared statements in the server (server side prepared statements are
+ * unrelated to jdbc PreparedStatements) As of build 302, this method is equivalent to
+ * setPrepareThreshold(1).
+ *
+ * @param flag use server prepare
+ * @throws SQLException if something goes wrong
+ * @since 7.3
+ * @deprecated As of build 302, replaced by {@link #setPrepareThreshold(int)}
+ */
+ @Deprecated
+ void setUseServerPrepare(boolean flag) throws SQLException;
- /**
- *
Sets the reuse threshold for using server-prepared statements.
- *
- *
If threshold is a non-zero value N, the Nth and subsequent reuses of a
- * PreparedStatement will use server-side prepare.
- *
- *
If threshold is zero, server-side prepare will not be used.
- *
- *
The reuse threshold is only used by PreparedStatement and CallableStatement objects; it is
- * ignored for plain Statements.
- *
- * @param threshold the new threshold for this statement
- * @throws SQLException if an exception occurs while changing the threshold
- * @since build 302
- */
- void setPrepareThreshold(int threshold) throws SQLException;
+ /**
+ * Gets the server-side prepare reuse threshold in use for this statement.
+ *
+ * @return the current threshold
+ * @see #setPrepareThreshold(int)
+ * @since build 302
+ */
+ int getPrepareThreshold();
- /**
- * Gets the server-side prepare reuse threshold in use for this statement.
- *
- * @return the current threshold
- * @see #setPrepareThreshold(int)
- * @since build 302
- */
- int getPrepareThreshold();
+ /**
+ *
Sets the reuse threshold for using server-prepared statements.
+ *
+ *
If threshold is a non-zero value N, the Nth and subsequent reuses of a
+ * PreparedStatement will use server-side prepare.
+ *
+ *
If threshold is zero, server-side prepare will not be used.
+ *
+ *
The reuse threshold is only used by PreparedStatement and CallableStatement objects; it is
+ * ignored for plain Statements.
+ *
+ * @param threshold the new threshold for this statement
+ * @throws SQLException if an exception occurs while changing the threshold
+ * @since build 302
+ */
+ void setPrepareThreshold(int threshold) throws SQLException;
- /**
- * Turn on/off adaptive fetch for statement. Existing resultSets won't be affected by change
- * here.
- *
- * @param adaptiveFetch desired state of adaptive fetch.
- */
- void setAdaptiveFetch(boolean adaptiveFetch);
+ /**
+ * Get state of adaptive fetch for statement.
+ *
+ * @return state of adaptive fetch (turned on or off)
+ */
+ boolean getAdaptiveFetch();
- /**
- * Get state of adaptive fetch for statement.
- *
- * @return state of adaptive fetch (turned on or off)
- */
- boolean getAdaptiveFetch();
+ /**
+ * Turn on/off adaptive fetch for statement. Existing resultSets won't be affected by change
+ * here.
+ *
+ * @param adaptiveFetch desired state of adaptive fetch.
+ */
+ void setAdaptiveFetch(boolean adaptiveFetch);
}
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java
index b0cd5b4..bd17831 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java
@@ -5,48 +5,47 @@
package org.postgresql.copy;
-import org.postgresql.util.ByteStreamWriter;
-
import java.sql.SQLException;
+import org.postgresql.util.ByteStreamWriter;
/**
* Copy bulk data from client into a PostgreSQL table very fast.
*/
public interface CopyIn extends CopyOperation {
- /**
- * Writes specified part of given byte array to an open and writable copy operation.
- *
- * @param buf array of bytes to write
- * @param off offset of first byte to write (normally zero)
- * @param siz number of bytes to write (normally buf.length)
- * @throws SQLException if the operation fails
- */
- void writeToCopy(byte[] buf, int off, int siz) throws SQLException;
+ /**
+ * Writes specified part of given byte array to an open and writable copy operation.
+ *
+ * @param buf array of bytes to write
+ * @param off offset of first byte to write (normally zero)
+ * @param siz number of bytes to write (normally buf.length)
+ * @throws SQLException if the operation fails
+ */
+ void writeToCopy(byte[] buf, int off, int siz) throws SQLException;
- /**
- * Writes a ByteStreamWriter to an open and writable copy operation.
- *
- * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
- * @throws SQLException if the operation fails
- */
- void writeToCopy(ByteStreamWriter from) throws SQLException;
+ /**
+ * Writes a ByteStreamWriter to an open and writable copy operation.
+ *
+ * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
+ * @throws SQLException if the operation fails
+ */
+ void writeToCopy(ByteStreamWriter from) throws SQLException;
- /**
- * Force any buffered output to be sent over the network to the backend. In general this is a
- * useless operation as it will get pushed over in due time or when endCopy is called. Some
- * specific modified server versions (Truviso) want this data sooner. If you are unsure if you
- * need to use this method, don't.
- *
- * @throws SQLException if the operation fails.
- */
- void flushCopy() throws SQLException;
+ /**
+ * Force any buffered output to be sent over the network to the backend. In general this is a
+ * useless operation as it will get pushed over in due time or when endCopy is called. Some
+ * specific modified server versions (Truviso) want this data sooner. If you are unsure if you
+ * need to use this method, don't.
+ *
+ * @throws SQLException if the operation fails.
+ */
+ void flushCopy() throws SQLException;
- /**
- * Finishes copy operation successfully.
- *
- * @return number of updated rows for server 8.2 or newer (see getHandledRowCount())
- * @throws SQLException if the operation fails.
- */
- long endCopy() throws SQLException;
+ /**
+ * Finishes copy operation successfully.
+ *
+ * @return number of updated rows for server 8.2 or newer (see getHandledRowCount())
+ * @throws SQLException if the operation fails.
+ */
+ long endCopy() throws SQLException;
}
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java
index 8849f19..ea1a4d7 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java
@@ -5,6 +5,12 @@
package org.postgresql.copy;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.Reader;
+import java.io.Writer;
+import java.sql.SQLException;
import org.postgresql.core.BaseConnection;
import org.postgresql.core.Encoding;
import org.postgresql.core.QueryExecutor;
@@ -13,244 +19,237 @@ import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.Reader;
-import java.io.Writer;
-import java.sql.SQLException;
-
/**
* API for PostgreSQL COPY bulk data transfer.
*/
public class CopyManager {
- // I don't know what the best buffer size is, so we let people specify it if
- // they want, and if they don't know, we don't make them guess, so that if we
- // do figure it out we can just set it here and they reap the rewards.
- // Note that this is currently being used for both a number of bytes and a number
- // of characters.
- static final int DEFAULT_BUFFER_SIZE = 65536;
+ // I don't know what the best buffer size is, so we let people specify it if
+ // they want, and if they don't know, we don't make them guess, so that if we
+ // do figure it out we can just set it here and they reap the rewards.
+ // Note that this is currently being used for both a number of bytes and a number
+ // of characters.
+ static final int DEFAULT_BUFFER_SIZE = 65536;
- private final Encoding encoding;
- private final QueryExecutor queryExecutor;
- private final BaseConnection connection;
+ private final Encoding encoding;
+ private final QueryExecutor queryExecutor;
+ private final BaseConnection connection;
- public CopyManager(BaseConnection connection) throws SQLException {
- this.encoding = connection.getEncoding();
- this.queryExecutor = connection.getQueryExecutor();
- this.connection = connection;
- }
-
- public CopyIn copyIn(String sql) throws SQLException {
- CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
- if (op == null || op instanceof CopyIn) {
- return (CopyIn) op;
- } else {
- op.cancelCopy();
- throw new PSQLException(GT.tr("Requested CopyIn but got {0}", op.getClass().getName()),
- PSQLState.WRONG_OBJECT_TYPE);
+ public CopyManager(BaseConnection connection) throws SQLException {
+ this.encoding = connection.getEncoding();
+ this.queryExecutor = connection.getQueryExecutor();
+ this.connection = connection;
}
- }
- public CopyOut copyOut(String sql) throws SQLException {
- CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
- if (op == null || op instanceof CopyOut) {
- return (CopyOut) op;
- } else {
- op.cancelCopy();
- throw new PSQLException(GT.tr("Requested CopyOut but got {0}", op.getClass().getName()),
- PSQLState.WRONG_OBJECT_TYPE);
- }
- }
-
- public CopyDual copyDual(String sql) throws SQLException {
- CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
- if (op == null || op instanceof CopyDual) {
- return (CopyDual) op;
- } else {
- op.cancelCopy();
- throw new PSQLException(GT.tr("Requested CopyDual but got {0}", op.getClass().getName()),
- PSQLState.WRONG_OBJECT_TYPE);
- }
- }
-
- /**
- * Pass results of a COPY TO STDOUT query from database into a Writer.
- *
- * @param sql COPY TO STDOUT statement
- * @param to the Writer to write the results to (row by row).
- * The Writer is not closed at the end of the Copy Out operation.
- * @return number of rows updated for server 8.2 or newer; -1 for older
- * @throws SQLException on database usage errors
- * @throws IOException upon writer or database connection failure
- */
- public long copyOut(final String sql, Writer to) throws SQLException, IOException {
- byte[] buf;
- CopyOut cp = copyOut(sql);
- try {
- while ((buf = cp.readFromCopy()) != null) {
- to.write(encoding.decode(buf));
- }
- return cp.getHandledRowCount();
- } catch (IOException ioEX) {
- // if not handled this way the close call will hang, at least in 8.2
- if (cp.isActive()) {
- cp.cancelCopy();
- }
- try { // read until exhausted or operation cancelled SQLException
- while ((buf = cp.readFromCopy()) != null) {
+ public CopyIn copyIn(String sql) throws SQLException {
+ CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
+ if (op == null || op instanceof CopyIn) {
+ return (CopyIn) op;
+ } else {
+ op.cancelCopy();
+ throw new PSQLException(GT.tr("Requested CopyIn but got {0}", op.getClass().getName()),
+ PSQLState.WRONG_OBJECT_TYPE);
}
- } catch (SQLException sqlEx) {
- } // typically after several kB
- throw ioEX;
- } finally { // see to it that we do not leave the connection locked
- if (cp.isActive()) {
- cp.cancelCopy();
- }
}
- }
- /**
- * Pass results of a COPY TO STDOUT query from database into an OutputStream.
- *
- * @param sql COPY TO STDOUT statement
- * @param to the stream to write the results to (row by row)
- * The stream is not closed at the end of the operation. This is intentional so the
- * caller can continue to write to the output stream
- * @return number of rows updated for server 8.2 or newer; -1 for older
- * @throws SQLException on database usage errors
- * @throws IOException upon output stream or database connection failure
- */
- public long copyOut(final String sql, OutputStream to) throws SQLException, IOException {
- byte[] buf;
- CopyOut cp = copyOut(sql);
- try {
- while ((buf = cp.readFromCopy()) != null) {
- to.write(buf);
- }
- return cp.getHandledRowCount();
- } catch (IOException ioEX) {
- // if not handled this way the close call will hang, at least in 8.2
- if (cp.isActive()) {
- cp.cancelCopy();
- }
- try { // read until exhausted or operation cancelled SQLException
- while ((buf = cp.readFromCopy()) != null) {
+ public CopyOut copyOut(String sql) throws SQLException {
+ CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
+ if (op == null || op instanceof CopyOut) {
+ return (CopyOut) op;
+ } else {
+ op.cancelCopy();
+ throw new PSQLException(GT.tr("Requested CopyOut but got {0}", op.getClass().getName()),
+ PSQLState.WRONG_OBJECT_TYPE);
}
- } catch (SQLException sqlEx) {
- } // typically after several kB
- throw ioEX;
- } finally { // see to it that we do not leave the connection locked
- if (cp.isActive()) {
- cp.cancelCopy();
- }
}
- }
- /**
- * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
- *
- * @param sql COPY FROM STDIN statement
- * @param from a CSV file or such
- * @return number of rows updated for server 8.2 or newer; -1 for older
- * @throws SQLException on database usage issues
- * @throws IOException upon reader or database connection failure
- */
- public long copyIn(final String sql, Reader from) throws SQLException, IOException {
- return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
- }
-
- /**
- * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
- *
- * @param sql COPY FROM STDIN statement
- * @param from a CSV file or such
- * @param bufferSize number of characters to buffer and push over network to server at once
- * @return number of rows updated for server 8.2 or newer; -1 for older
- * @throws SQLException on database usage issues
- * @throws IOException upon reader or database connection failure
- */
- public long copyIn(final String sql, Reader from, int bufferSize)
- throws SQLException, IOException {
- char[] cbuf = new char[bufferSize];
- int len;
- CopyIn cp = copyIn(sql);
- try {
- while ((len = from.read(cbuf)) >= 0) {
- if (len > 0) {
- byte[] buf = encoding.encode(new String(cbuf, 0, len));
- cp.writeToCopy(buf, 0, buf.length);
+ public CopyDual copyDual(String sql) throws SQLException {
+ CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
+ if (op == null || op instanceof CopyDual) {
+ return (CopyDual) op;
+ } else {
+ op.cancelCopy();
+ throw new PSQLException(GT.tr("Requested CopyDual but got {0}", op.getClass().getName()),
+ PSQLState.WRONG_OBJECT_TYPE);
}
- }
- return cp.endCopy();
- } finally { // see to it that we do not leave the connection locked
- if (cp.isActive()) {
- cp.cancelCopy();
- }
}
- }
- /**
- * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
- *
- * @param sql COPY FROM STDIN statement
- * @param from a CSV file or such
- * @return number of rows updated for server 8.2 or newer; -1 for older
- * @throws SQLException on database usage issues
- * @throws IOException upon input stream or database connection failure
- */
- public long copyIn(final String sql, InputStream from) throws SQLException, IOException {
- return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
- }
-
- /**
- * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
- *
- * @param sql COPY FROM STDIN statement
- * @param from a CSV file or such
- * @param bufferSize number of bytes to buffer and push over network to server at once
- * @return number of rows updated for server 8.2 or newer; -1 for older
- * @throws SQLException on database usage issues
- * @throws IOException upon input stream or database connection failure
- */
- public long copyIn(final String sql, InputStream from, int bufferSize)
- throws SQLException, IOException {
- byte[] buf = new byte[bufferSize];
- int len;
- CopyIn cp = copyIn(sql);
- try {
- while ((len = from.read(buf)) >= 0) {
- if (len > 0) {
- cp.writeToCopy(buf, 0, len);
+ /**
+ * Pass results of a COPY TO STDOUT query from database into a Writer.
+ *
+ * @param sql COPY TO STDOUT statement
+ * @param to the Writer to write the results to (row by row).
+ * The Writer is not closed at the end of the Copy Out operation.
+ * @return number of rows updated for server 8.2 or newer; -1 for older
+ * @throws SQLException on database usage errors
+ * @throws IOException upon writer or database connection failure
+ */
+ public long copyOut(final String sql, Writer to) throws SQLException, IOException {
+ byte[] buf;
+ CopyOut cp = copyOut(sql);
+ try {
+ while ((buf = cp.readFromCopy()) != null) {
+ to.write(encoding.decode(buf));
+ }
+ return cp.getHandledRowCount();
+ } catch (IOException ioEX) {
+ // if not handled this way the close call will hang, at least in 8.2
+ if (cp.isActive()) {
+ cp.cancelCopy();
+ }
+ try { // read until exhausted or operation cancelled SQLException
+ while ((buf = cp.readFromCopy()) != null) {
+ }
+ } catch (SQLException sqlEx) {
+ } // typically after several kB
+ throw ioEX;
+ } finally { // see to it that we do not leave the connection locked
+ if (cp.isActive()) {
+ cp.cancelCopy();
+ }
}
- }
- return cp.endCopy();
- } finally { // see to it that we do not leave the connection locked
- if (cp.isActive()) {
- cp.cancelCopy();
- }
}
- }
- /**
- * Use COPY FROM STDIN for very fast copying from an ByteStreamWriter into a database table.
- *
- * @param sql COPY FROM STDIN statement
- * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
- * @return number of rows updated for server 8.2 or newer; -1 for older
- * @throws SQLException on database usage issues
- * @throws IOException upon input stream or database connection failure
- */
- public long copyIn(String sql, ByteStreamWriter from)
- throws SQLException, IOException {
- CopyIn cp = copyIn(sql);
- try {
- cp.writeToCopy(from);
- return cp.endCopy();
- } finally { // see to it that we do not leave the connection locked
- if (cp.isActive()) {
- cp.cancelCopy();
- }
+ /**
+ * Pass results of a COPY TO STDOUT query from database into an OutputStream.
+ *
+ * @param sql COPY TO STDOUT statement
+ * @param to the stream to write the results to (row by row)
+ * The stream is not closed at the end of the operation. This is intentional so the
+ * caller can continue to write to the output stream
+ * @return number of rows updated for server 8.2 or newer; -1 for older
+ * @throws SQLException on database usage errors
+ * @throws IOException upon output stream or database connection failure
+ */
+ public long copyOut(final String sql, OutputStream to) throws SQLException, IOException {
+ byte[] buf;
+ CopyOut cp = copyOut(sql);
+ try {
+ while ((buf = cp.readFromCopy()) != null) {
+ to.write(buf);
+ }
+ return cp.getHandledRowCount();
+ } catch (IOException ioEX) {
+ // if not handled this way the close call will hang, at least in 8.2
+ if (cp.isActive()) {
+ cp.cancelCopy();
+ }
+ try { // read until exhausted or operation cancelled SQLException
+ while ((buf = cp.readFromCopy()) != null) {
+ }
+ } catch (SQLException sqlEx) {
+ } // typically after several kB
+ throw ioEX;
+ } finally { // see to it that we do not leave the connection locked
+ if (cp.isActive()) {
+ cp.cancelCopy();
+ }
+ }
+ }
+
+ /**
+ * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
+ *
+ * @param sql COPY FROM STDIN statement
+ * @param from a CSV file or such
+ * @return number of rows updated for server 8.2 or newer; -1 for older
+ * @throws SQLException on database usage issues
+ * @throws IOException upon reader or database connection failure
+ */
+ public long copyIn(final String sql, Reader from) throws SQLException, IOException {
+ return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
+ }
+
+ /**
+ * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
+ *
+ * @param sql COPY FROM STDIN statement
+ * @param from a CSV file or such
+ * @param bufferSize number of characters to buffer and push over network to server at once
+ * @return number of rows updated for server 8.2 or newer; -1 for older
+ * @throws SQLException on database usage issues
+ * @throws IOException upon reader or database connection failure
+ */
+ public long copyIn(final String sql, Reader from, int bufferSize)
+ throws SQLException, IOException {
+ char[] cbuf = new char[bufferSize];
+ int len;
+ CopyIn cp = copyIn(sql);
+ try {
+ while ((len = from.read(cbuf)) >= 0) {
+ if (len > 0) {
+ byte[] buf = encoding.encode(new String(cbuf, 0, len));
+ cp.writeToCopy(buf, 0, buf.length);
+ }
+ }
+ return cp.endCopy();
+ } finally { // see to it that we do not leave the connection locked
+ if (cp.isActive()) {
+ cp.cancelCopy();
+ }
+ }
+ }
+
+ /**
+ * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
+ *
+ * @param sql COPY FROM STDIN statement
+ * @param from a CSV file or such
+ * @return number of rows updated for server 8.2 or newer; -1 for older
+ * @throws SQLException on database usage issues
+ * @throws IOException upon input stream or database connection failure
+ */
+ public long copyIn(final String sql, InputStream from) throws SQLException, IOException {
+ return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
+ }
+
+ /**
+ * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
+ *
+ * @param sql COPY FROM STDIN statement
+ * @param from a CSV file or such
+ * @param bufferSize number of bytes to buffer and push over network to server at once
+ * @return number of rows updated for server 8.2 or newer; -1 for older
+ * @throws SQLException on database usage issues
+ * @throws IOException upon input stream or database connection failure
+ */
+ public long copyIn(final String sql, InputStream from, int bufferSize)
+ throws SQLException, IOException {
+ byte[] buf = new byte[bufferSize];
+ int len;
+ CopyIn cp = copyIn(sql);
+ try {
+ while ((len = from.read(buf)) >= 0) {
+ if (len > 0) {
+ cp.writeToCopy(buf, 0, len);
+ }
+ }
+ return cp.endCopy();
+ } finally { // see to it that we do not leave the connection locked
+ if (cp.isActive()) {
+ cp.cancelCopy();
+ }
+ }
+ }
+
+ /**
+ * Use COPY FROM STDIN for very fast copying from an ByteStreamWriter into a database table.
+ *
+ * @param sql COPY FROM STDIN statement
+ * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
+ * @return number of rows updated for server 8.2 or newer; -1 for older
+ * @throws SQLException on database usage issues
+ * @throws IOException upon input stream or database connection failure
+ */
+ public long copyIn(String sql, ByteStreamWriter from)
+ throws SQLException, IOException {
+ CopyIn cp = copyIn(sql);
+ try {
+ cp.writeToCopy(from);
+ return cp.endCopy();
+ } finally { // see to it that we do not leave the connection locked
+ if (cp.isActive()) {
+ cp.cancelCopy();
+ }
+ }
}
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java
index 239c629..a7c485b 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java
@@ -13,39 +13,39 @@ import java.sql.SQLException;
*/
public interface CopyOperation {
- /**
- * @return number of fields in each row for this operation
- */
- int getFieldCount();
+ /**
+ * @return number of fields in each row for this operation
+ */
+ int getFieldCount();
- /**
- * @return overall format of each row: 0 = textual, 1 = binary
- */
- int getFormat();
+ /**
+ * @return overall format of each row: 0 = textual, 1 = binary
+ */
+ int getFormat();
- /**
- * @param field number of field (0..fieldCount()-1)
- * @return format of requested field: 0 = textual, 1 = binary
- */
- int getFieldFormat(int field);
+ /**
+ * @param field number of field (0..fieldCount()-1)
+ * @return format of requested field: 0 = textual, 1 = binary
+ */
+ int getFieldFormat(int field);
- /**
- * @return is connection reserved for this Copy operation?
- */
- boolean isActive();
+ /**
+ * @return is connection reserved for this Copy operation?
+ */
+ boolean isActive();
- /**
- * Cancels this copy operation, discarding any exchanged data.
- *
- * @throws SQLException if cancelling fails
- */
- void cancelCopy() throws SQLException;
+ /**
+ * Cancels this copy operation, discarding any exchanged data.
+ *
+ * @throws SQLException if cancelling fails
+ */
+ void cancelCopy() throws SQLException;
- /**
- * After successful end of copy, returns the number of database records handled in that operation.
- * Only implemented in PostgreSQL server version 8.2 and up. Otherwise, returns -1.
- *
- * @return number of handled rows or -1
- */
- long getHandledRowCount();
+ /**
+ * After successful end of copy, returns the number of database records handled in that operation.
+ * Only implemented in PostgreSQL server version 8.2 and up. Otherwise, returns -1.
+ *
+ * @return number of handled rows or -1
+ */
+ long getHandledRowCount();
}
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java
index e7918e1..73e09c7 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java
@@ -8,22 +8,22 @@ package org.postgresql.copy;
import java.sql.SQLException;
public interface CopyOut extends CopyOperation {
- /**
- * Blocks wait for a row of data to be received from server on an active copy operation.
- *
- * @return byte array received from server, null if server complete copy operation
- * @throws SQLException if something goes wrong for example socket timeout
- */
- byte [] readFromCopy() throws SQLException;
+ /**
+ * Blocks wait for a row of data to be received from server on an active copy operation.
+ *
+ * @return byte array received from server, null if server complete copy operation
+ * @throws SQLException if something goes wrong for example socket timeout
+ */
+ byte[] readFromCopy() throws SQLException;
- /**
- * Wait for a row of data to be received from server on an active copy operation.
- *
- * @param block {@code true} if need wait data from server otherwise {@code false} and will read
- * pending message from server
- * @return byte array received from server, if pending message from server absent and use no
- * blocking mode return null
- * @throws SQLException if something goes wrong for example socket timeout
- */
- byte [] readFromCopy(boolean block) throws SQLException;
+ /**
+ * Wait for a row of data to be received from server on an active copy operation.
+ *
+ * @param block {@code true} if need wait data from server otherwise {@code false} and will read
+ * pending message from server
+ * @return byte array received from server, if pending message from server absent and use no
+ * blocking mode return null
+ * @throws SQLException if something goes wrong for example socket timeout
+ */
+ byte[] readFromCopy(boolean block) throws SQLException;
}
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java
index aefd13a..eea37a9 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java
@@ -5,174 +5,173 @@
package org.postgresql.copy;
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.SQLException;
+import java.util.Arrays;
import org.postgresql.PGConnection;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
-import java.io.IOException;
-import java.io.InputStream;
-import java.sql.SQLException;
-import java.util.Arrays;
-
/**
* InputStream for reading from a PostgreSQL COPY TO STDOUT operation.
*/
public class PGCopyInputStream extends InputStream implements CopyOut {
- private CopyOut op;
- private byte [] buf;
- private int at;
- private int len;
+ private CopyOut op;
+ private byte[] buf;
+ private int at;
+ private int len;
- /**
- * Uses given connection for specified COPY TO STDOUT operation.
- *
- * @param connection database connection to use for copying (protocol version 3 required)
- * @param sql COPY TO STDOUT statement
- * @throws SQLException if initializing the operation fails
- */
- public PGCopyInputStream(PGConnection connection, String sql) throws SQLException {
- this(connection.getCopyAPI().copyOut(sql));
- }
-
- /**
- * Use given CopyOut operation for reading.
- *
- * @param op COPY TO STDOUT operation
- */
- public PGCopyInputStream(CopyOut op) {
- this.op = op;
- }
-
- private CopyOut getOp() {
- return op;
- }
-
- private byte [] fillBuffer() throws IOException {
- if (at >= len) {
- try {
- buf = getOp().readFromCopy();
- } catch (SQLException sqle) {
- throw new IOException(GT.tr("Copying from database failed: {0}", sqle.getMessage()), sqle);
- }
- if (buf == null) {
- at = -1;
- } else {
- at = 0;
- len = buf.length;
- }
+ /**
+ * Uses given connection for specified COPY TO STDOUT operation.
+ *
+ * @param connection database connection to use for copying (protocol version 3 required)
+ * @param sql COPY TO STDOUT statement
+ * @throws SQLException if initializing the operation fails
+ */
+ public PGCopyInputStream(PGConnection connection, String sql) throws SQLException {
+ this(connection.getCopyAPI().copyOut(sql));
}
- return buf;
- }
- private void checkClosed() throws IOException {
- if (op == null) {
- throw new IOException(GT.tr("This copy stream is closed."));
+ /**
+ * Use given CopyOut operation for reading.
+ *
+ * @param op COPY TO STDOUT operation
+ */
+ public PGCopyInputStream(CopyOut op) {
+ this.op = op;
}
- }
- @Override
- public int available() throws IOException {
- checkClosed();
- return buf != null ? len - at : 0;
- }
-
- @Override
- public int read() throws IOException {
- checkClosed();
- byte[] buf = fillBuffer();
- return buf != null ? (buf[at++] & 0xFF) : -1;
- }
-
- @Override
- public int read(byte[] buf) throws IOException {
- return read(buf, 0, buf.length);
- }
-
- @Override
- public int read(byte[] buf, int off, int siz) throws IOException {
- checkClosed();
- int got = 0;
- byte[] data = fillBuffer();
- for (; got < siz && data != null; data = fillBuffer()) {
- int length = Math.min(siz - got, len - at);
- System.arraycopy(data, at, buf, off + got, length);
- at += length;
- got += length;
+ private CopyOut getOp() {
+ return op;
}
- return got == 0 && data == null ? -1 : got;
- }
- @Override
- public byte [] readFromCopy() throws SQLException {
- byte[] result = null;
- try {
- byte[] buf = fillBuffer();
- if (buf != null) {
- if (at > 0 || len < buf.length) {
- result = Arrays.copyOfRange(buf, at, len);
- } else {
- result = buf;
+ private byte[] fillBuffer() throws IOException {
+ if (at >= len) {
+ try {
+ buf = getOp().readFromCopy();
+ } catch (SQLException sqle) {
+ throw new IOException(GT.tr("Copying from database failed: {0}", sqle.getMessage()), sqle);
+ }
+ if (buf == null) {
+ at = -1;
+ } else {
+ at = 0;
+ len = buf.length;
+ }
}
- // Mark the buffer as fully read
- at = len;
- }
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Read from copy failed."), PSQLState.CONNECTION_FAILURE, ioe);
- }
- return result;
- }
-
- @Override
- public byte [] readFromCopy(boolean block) throws SQLException {
- return readFromCopy();
- }
-
- @Override
- public void close() throws IOException {
- // Don't complain about a double close.
- CopyOut op = this.op;
- if (op == null) {
- return;
+ return buf;
}
- if (op.isActive()) {
- try {
- op.cancelCopy();
- } catch (SQLException se) {
- throw new IOException("Failed to close copy reader.", se);
- }
+ private void checkClosed() throws IOException {
+ if (op == null) {
+ throw new IOException(GT.tr("This copy stream is closed."));
+ }
}
- this.op = null;
- }
- @Override
- public void cancelCopy() throws SQLException {
- getOp().cancelCopy();
- }
+ @Override
+ public int available() throws IOException {
+ checkClosed();
+ return buf != null ? len - at : 0;
+ }
- @Override
- public int getFormat() {
- return getOp().getFormat();
- }
+ @Override
+ public int read() throws IOException {
+ checkClosed();
+ byte[] buf = fillBuffer();
+ return buf != null ? (buf[at++] & 0xFF) : -1;
+ }
- @Override
- public int getFieldFormat(int field) {
- return getOp().getFieldFormat(field);
- }
+ @Override
+ public int read(byte[] buf) throws IOException {
+ return read(buf, 0, buf.length);
+ }
- @Override
- public int getFieldCount() {
- return getOp().getFieldCount();
- }
+ @Override
+ public int read(byte[] buf, int off, int siz) throws IOException {
+ checkClosed();
+ int got = 0;
+ byte[] data = fillBuffer();
+ for (; got < siz && data != null; data = fillBuffer()) {
+ int length = Math.min(siz - got, len - at);
+ System.arraycopy(data, at, buf, off + got, length);
+ at += length;
+ got += length;
+ }
+ return got == 0 && data == null ? -1 : got;
+ }
- @Override
- public boolean isActive() {
- return op != null && op.isActive();
- }
+ @Override
+ public byte[] readFromCopy() throws SQLException {
+ byte[] result = null;
+ try {
+ byte[] buf = fillBuffer();
+ if (buf != null) {
+ if (at > 0 || len < buf.length) {
+ result = Arrays.copyOfRange(buf, at, len);
+ } else {
+ result = buf;
+ }
+ // Mark the buffer as fully read
+ at = len;
+ }
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Read from copy failed."), PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ return result;
+ }
- @Override
- public long getHandledRowCount() {
- return getOp().getHandledRowCount();
- }
+ @Override
+ public byte[] readFromCopy(boolean block) throws SQLException {
+ return readFromCopy();
+ }
+
+ @Override
+ public void close() throws IOException {
+ // Don't complain about a double close.
+ CopyOut op = this.op;
+ if (op == null) {
+ return;
+ }
+
+ if (op.isActive()) {
+ try {
+ op.cancelCopy();
+ } catch (SQLException se) {
+ throw new IOException("Failed to close copy reader.", se);
+ }
+ }
+ this.op = null;
+ }
+
+ @Override
+ public void cancelCopy() throws SQLException {
+ getOp().cancelCopy();
+ }
+
+ @Override
+ public int getFormat() {
+ return getOp().getFormat();
+ }
+
+ @Override
+ public int getFieldFormat(int field) {
+ return getOp().getFieldFormat(field);
+ }
+
+ @Override
+ public int getFieldCount() {
+ return getOp().getFieldCount();
+ }
+
+ @Override
+ public boolean isActive() {
+ return op != null && op.isActive();
+ }
+
+ @Override
+ public long getHandledRowCount() {
+ return getOp().getHandledRowCount();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java
index 322a5a9..c05f6b9 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java
@@ -5,199 +5,198 @@
package org.postgresql.copy;
-import org.postgresql.PGConnection;
-import org.postgresql.util.ByteStreamWriter;
-import org.postgresql.util.GT;
-
import java.io.IOException;
import java.io.OutputStream;
import java.sql.SQLException;
+import org.postgresql.PGConnection;
+import org.postgresql.util.ByteStreamWriter;
+import org.postgresql.util.GT;
/**
* OutputStream for buffered input into a PostgreSQL COPY FROM STDIN operation.
*/
public class PGCopyOutputStream extends OutputStream implements CopyIn {
- private CopyIn op;
- private final byte[] copyBuffer;
- private final byte[] singleByteBuffer = new byte[1];
- private int at;
+ private final byte[] copyBuffer;
+ private final byte[] singleByteBuffer = new byte[1];
+ private CopyIn op;
+ private int at;
- /**
- * Uses given connection for specified COPY FROM STDIN operation.
- *
- * @param connection database connection to use for copying (protocol version 3 required)
- * @param sql COPY FROM STDIN statement
- * @throws SQLException if initializing the operation fails
- */
- public PGCopyOutputStream(PGConnection connection, String sql) throws SQLException {
- this(connection, sql, CopyManager.DEFAULT_BUFFER_SIZE);
- }
-
- /**
- * Uses given connection for specified COPY FROM STDIN operation.
- *
- * @param connection database connection to use for copying (protocol version 3 required)
- * @param sql COPY FROM STDIN statement
- * @param bufferSize try to send this many bytes at a time
- * @throws SQLException if initializing the operation fails
- */
- public PGCopyOutputStream(PGConnection connection, String sql, int bufferSize)
- throws SQLException {
- this(connection.getCopyAPI().copyIn(sql), bufferSize);
- }
-
- /**
- * Use given CopyIn operation for writing.
- *
- * @param op COPY FROM STDIN operation
- */
- public PGCopyOutputStream(CopyIn op) {
- this(op, CopyManager.DEFAULT_BUFFER_SIZE);
- }
-
- /**
- * Use given CopyIn operation for writing.
- *
- * @param op COPY FROM STDIN operation
- * @param bufferSize try to send this many bytes at a time
- */
- public PGCopyOutputStream(CopyIn op, int bufferSize) {
- this.op = op;
- copyBuffer = new byte[bufferSize];
- }
-
- private CopyIn getOp() {
- return op;
- }
-
- @Override
- public void write(int b) throws IOException {
- checkClosed();
- if (b < 0 || b > 255) {
- throw new IOException(GT.tr("Cannot write to copy a byte of value {0}", b));
- }
- singleByteBuffer[0] = (byte) b;
- write(singleByteBuffer, 0, 1);
- }
-
- @Override
- public void write(byte[] buf) throws IOException {
- write(buf, 0, buf.length);
- }
-
- @Override
- public void write(byte[] buf, int off, int siz) throws IOException {
- checkClosed();
- try {
- writeToCopy(buf, off, siz);
- } catch (SQLException se) {
- throw new IOException("Write to copy failed.", se);
- }
- }
-
- private void checkClosed() throws IOException {
- if (op == null) {
- throw new IOException(GT.tr("This copy stream is closed."));
- }
- }
-
- @Override
- public void close() throws IOException {
- // Don't complain about a double close.
- CopyIn op = this.op;
- if (op == null) {
- return;
+ /**
+ * Uses given connection for specified COPY FROM STDIN operation.
+ *
+ * @param connection database connection to use for copying (protocol version 3 required)
+ * @param sql COPY FROM STDIN statement
+ * @throws SQLException if initializing the operation fails
+ */
+ public PGCopyOutputStream(PGConnection connection, String sql) throws SQLException {
+ this(connection, sql, CopyManager.DEFAULT_BUFFER_SIZE);
}
- if (op.isActive()) {
- try {
- endCopy();
- } catch (SQLException se) {
- throw new IOException("Ending write to copy failed.", se);
- }
+ /**
+ * Uses given connection for specified COPY FROM STDIN operation.
+ *
+ * @param connection database connection to use for copying (protocol version 3 required)
+ * @param sql COPY FROM STDIN statement
+ * @param bufferSize try to send this many bytes at a time
+ * @throws SQLException if initializing the operation fails
+ */
+ public PGCopyOutputStream(PGConnection connection, String sql, int bufferSize)
+ throws SQLException {
+ this(connection.getCopyAPI().copyIn(sql), bufferSize);
}
- this.op = null;
- }
- @Override
- public void flush() throws IOException {
- checkClosed();
- try {
- getOp().writeToCopy(copyBuffer, 0, at);
- at = 0;
- getOp().flushCopy();
- } catch (SQLException e) {
- throw new IOException("Unable to flush stream", e);
+ /**
+ * Use given CopyIn operation for writing.
+ *
+ * @param op COPY FROM STDIN operation
+ */
+ public PGCopyOutputStream(CopyIn op) {
+ this(op, CopyManager.DEFAULT_BUFFER_SIZE);
}
- }
- @Override
- public void writeToCopy(byte[] buf, int off, int siz) throws SQLException {
- if (at > 0
- && siz > copyBuffer.length - at) { // would not fit into rest of our buf, so flush buf
- getOp().writeToCopy(copyBuffer, 0, at);
- at = 0;
+ /**
+ * Use given CopyIn operation for writing.
+ *
+ * @param op COPY FROM STDIN operation
+ * @param bufferSize try to send this many bytes at a time
+ */
+ public PGCopyOutputStream(CopyIn op, int bufferSize) {
+ this.op = op;
+ copyBuffer = new byte[bufferSize];
}
- if (siz > copyBuffer.length) { // would still not fit into buf, so just pass it through
- getOp().writeToCopy(buf, off, siz);
- } else { // fits into our buf, so save it there
- System.arraycopy(buf, off, copyBuffer, at, siz);
- at += siz;
+
+ private CopyIn getOp() {
+ return op;
}
- }
- @Override
- public void writeToCopy(ByteStreamWriter from) throws SQLException {
- if (at > 0) {
- // flush existing buffer so order is preserved
- getOp().writeToCopy(copyBuffer, 0, at);
- at = 0;
+ @Override
+ public void write(int b) throws IOException {
+ checkClosed();
+ if (b < 0 || b > 255) {
+ throw new IOException(GT.tr("Cannot write to copy a byte of value {0}", b));
+ }
+ singleByteBuffer[0] = (byte) b;
+ write(singleByteBuffer, 0, 1);
}
- getOp().writeToCopy(from);
- }
- @Override
- public int getFormat() {
- return getOp().getFormat();
- }
-
- @Override
- public int getFieldFormat(int field) {
- return getOp().getFieldFormat(field);
- }
-
- @Override
- public void cancelCopy() throws SQLException {
- getOp().cancelCopy();
- }
-
- @Override
- public int getFieldCount() {
- return getOp().getFieldCount();
- }
-
- @Override
- public boolean isActive() {
- return op != null && getOp().isActive();
- }
-
- @Override
- public void flushCopy() throws SQLException {
- getOp().flushCopy();
- }
-
- @Override
- public long endCopy() throws SQLException {
- if (at > 0) {
- getOp().writeToCopy(copyBuffer, 0, at);
+ @Override
+ public void write(byte[] buf) throws IOException {
+ write(buf, 0, buf.length);
}
- getOp().endCopy();
- return getHandledRowCount();
- }
- @Override
- public long getHandledRowCount() {
- return getOp().getHandledRowCount();
- }
+ @Override
+ public void write(byte[] buf, int off, int siz) throws IOException {
+ checkClosed();
+ try {
+ writeToCopy(buf, off, siz);
+ } catch (SQLException se) {
+ throw new IOException("Write to copy failed.", se);
+ }
+ }
+
+ private void checkClosed() throws IOException {
+ if (op == null) {
+ throw new IOException(GT.tr("This copy stream is closed."));
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ // Don't complain about a double close.
+ CopyIn op = this.op;
+ if (op == null) {
+ return;
+ }
+
+ if (op.isActive()) {
+ try {
+ endCopy();
+ } catch (SQLException se) {
+ throw new IOException("Ending write to copy failed.", se);
+ }
+ }
+ this.op = null;
+ }
+
+ @Override
+ public void flush() throws IOException {
+ checkClosed();
+ try {
+ getOp().writeToCopy(copyBuffer, 0, at);
+ at = 0;
+ getOp().flushCopy();
+ } catch (SQLException e) {
+ throw new IOException("Unable to flush stream", e);
+ }
+ }
+
+ @Override
+ public void writeToCopy(byte[] buf, int off, int siz) throws SQLException {
+ if (at > 0
+ && siz > copyBuffer.length - at) { // would not fit into rest of our buf, so flush buf
+ getOp().writeToCopy(copyBuffer, 0, at);
+ at = 0;
+ }
+ if (siz > copyBuffer.length) { // would still not fit into buf, so just pass it through
+ getOp().writeToCopy(buf, off, siz);
+ } else { // fits into our buf, so save it there
+ System.arraycopy(buf, off, copyBuffer, at, siz);
+ at += siz;
+ }
+ }
+
+ @Override
+ public void writeToCopy(ByteStreamWriter from) throws SQLException {
+ if (at > 0) {
+ // flush existing buffer so order is preserved
+ getOp().writeToCopy(copyBuffer, 0, at);
+ at = 0;
+ }
+ getOp().writeToCopy(from);
+ }
+
+ @Override
+ public int getFormat() {
+ return getOp().getFormat();
+ }
+
+ @Override
+ public int getFieldFormat(int field) {
+ return getOp().getFieldFormat(field);
+ }
+
+ @Override
+ public void cancelCopy() throws SQLException {
+ getOp().cancelCopy();
+ }
+
+ @Override
+ public int getFieldCount() {
+ return getOp().getFieldCount();
+ }
+
+ @Override
+ public boolean isActive() {
+ return op != null && getOp().isActive();
+ }
+
+ @Override
+ public void flushCopy() throws SQLException {
+ getOp().flushCopy();
+ }
+
+ @Override
+ public long endCopy() throws SQLException {
+ if (at > 0) {
+ getOp().writeToCopy(copyBuffer, 0, at);
+ }
+ getOp().endCopy();
+ return getHandledRowCount();
+ }
+
+ @Override
+ public long getHandledRowCount() {
+ return getOp().getHandledRowCount();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java b/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java
index 3aed133..3ee5531 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java
@@ -29,326 +29,330 @@ import java.util.concurrent.ConcurrentMap;
*
* @author Brett Okken
*/
-final class AsciiStringInterner {
+public final class AsciiStringInterner {
- private abstract static class BaseKey {
- private final int hash;
+ /**
+ * Contains the canonicalized values, keyed by the ascii {@code byte[]}.
+ */
+ final ConcurrentMap> cache = new ConcurrentHashMap<>(128);
+ /**
+ * Used for {@link Reference} as values in {@code cache}.
+ */
+ final ReferenceQueue refQueue = new ReferenceQueue<>();
- BaseKey(int hash) {
- this.hash = hash;
+ public AsciiStringInterner() {
}
- @Override
- public final int hashCode() {
- return hash;
+ /**
+ * Generates a hash value for the relevant entries in bytes as long as all values are ascii ({@code >= 0}).
+ *
+ * @return hash code for relevant bytes, or {@code 0} if non-ascii bytes present.
+ */
+ private static int hashKey(byte[] bytes, int offset, int length) {
+ int result = 1;
+ for (int i = offset, j = offset + length; i < j; i++) {
+ final byte b = bytes[i];
+ // bytes are signed values. all ascii values are positive
+ if (b < 0) {
+ return 0;
+ }
+ result = 31 * result + b;
+ }
+ return result;
}
- @Override
- public final boolean equals(Object obj) {
- if (obj == this) {
+ /**
+ * Performs equality check between a and b (with corresponding offset/length values).
+ *
+ * The {@code static boolean equals(byte[].class, int, int, byte[], int, int} method in {@link java.util.Arrays}
+ * is optimized for longer {@code byte[]} instances than is expected to be seen here.
+ *
+ */
+ static boolean arrayEquals(byte[] a, int aOffset, int aLength, byte[] b, int bOffset, int bLength) {
+ if (aLength != bLength) {
+ return false;
+ }
+ //TODO: in jdk9, could use VarHandle to read 4 bytes at a time as an int for comparison
+ // or 8 bytes as a long - though we likely expect short values here
+ for (int i = 0; i < aLength; i++) {
+ if (a[aOffset + i] != b[bOffset + i]) {
+ return false;
+ }
+ }
return true;
- }
- if (!(obj instanceof BaseKey)) {
- return false;
- }
- final BaseKey other = (BaseKey) obj;
- return equalsBytes(other);
}
- abstract boolean equalsBytes(BaseKey other);
-
- abstract boolean equals(byte[] other, int offset, int length);
-
- abstract void appendString(StringBuilder sb);
- }
-
- /**
- * Only used for lookups, never to actually store entries.
- */
- private static class TempKey extends BaseKey {
- final byte[] bytes;
- final int offset;
- final int length;
-
- TempKey(int hash, byte[] bytes, int offset, int length) {
- super(hash);
- this.bytes = bytes;
- this.offset = offset;
- this.length = length;
+ /**
+ * Preemptively populates a value into the cache. This is intended to be used with {@code String} constants
+ * which are frequently used. While this can work with other {@code String} values, if val is ever
+ * garbage collected, it will not be actively removed from this instance.
+ *
+ * @param val The value to intern. Must not be {@code null}.
+ * @return Indication if val is an ascii String and placed into cache.
+ */
+ public boolean putString(String val) {
+ //ask for utf-8 so that we can detect if any of the characters are not ascii
+ final byte[] copy = val.getBytes(StandardCharsets.UTF_8);
+ final int hash = hashKey(copy, 0, copy.length);
+ if (hash == 0) {
+ return false;
+ }
+ final Key key = new Key(copy, hash);
+ //we are assuming this is a java interned string from , so this is unlikely to ever be
+ //reclaimed. so there is no value in using the custom StringReference or hand off to
+ //the refQueue.
+ //on the outside chance it actually does get reclaimed, it will just hang around as an
+ //empty reference in the map unless/until attempted to be retrieved
+ cache.put(key, new SoftReference(val));
+ return true;
}
- @Override
- boolean equalsBytes(BaseKey other) {
- return other.equals(bytes, offset, length);
+ /**
+ * Produces a {@link String} instance for the given bytes. If all are valid ascii (i.e. {@code >= 0})
+ * either an existing value will be returned, or the newly created {@code String} will be stored before being
+ * returned.
+ *
+ *
+ * If non-ascii bytes are discovered, the encoding will be used to
+ * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
+ *
+ *
+ * @param bytes The bytes of the String. Must not be {@code null}.
+ * @param offset Offset into bytes to start.
+ * @param length The number of bytes in bytes which are relevant.
+ * @param encoding To use if non-ascii bytes seen.
+ * @return Decoded {@code String} from bytes.
+ * @throws IOException If error decoding from Encoding.
+ */
+ public String getString(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
+ if (length == 0) {
+ return "";
+ }
+
+ final int hash = hashKey(bytes, offset, length);
+ // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
+ if (hash == 0) {
+ return encoding.decode(bytes, offset, length);
+ }
+ cleanQueue();
+ // create a TempKey with the byte[] given
+ final TempKey tempKey = new TempKey(hash, bytes, offset, length);
+ SoftReference ref = cache.get(tempKey);
+ if (ref != null) {
+ final String val = ref.get();
+ if (val != null) {
+ return val;
+ }
+ }
+ // in order to insert we need to create a "real" key with copy of bytes that will not be changed
+ final byte[] copy = Arrays.copyOfRange(bytes, offset, offset + length);
+ final Key key = new Key(copy, hash);
+ final String value = new String(copy, StandardCharsets.US_ASCII);
+
+ // handle case where a concurrent thread has populated the map or existing value has cleared reference
+ ref = cache.compute(key, (k, v) -> {
+ if (v == null) {
+ return new StringReference(key, value);
+ }
+ final String val = v.get();
+ return val != null ? v : new StringReference(key, value);
+ });
+
+ return ref.get();
}
- @Override
- public boolean equals(byte[] other, int offset, int length) {
- return arrayEquals(this.bytes, this.offset, this.length, other, offset, length);
+ /**
+ * Produces a {@link String} instance for the given bytes.
+ *
+ *
+ * If all are valid ascii (i.e. {@code >= 0}) and a corresponding {@code String} value exists, it
+ * will be returned. If no value exists, a {@code String} will be created, but not stored.
+ *
+ *
+ *
+ * If non-ascii bytes are discovered, the encoding will be used to
+ * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
+ *
+ *
+ * @param bytes The bytes of the String. Must not be {@code null}.
+ * @param offset Offset into bytes to start.
+ * @param length The number of bytes in bytes which are relevant.
+ * @param encoding To use if non-ascii bytes seen.
+ * @return Decoded {@code String} from bytes.
+ * @throws IOException If error decoding from Encoding.
+ */
+ public String getStringIfPresent(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
+ if (length == 0) {
+ return "";
+ }
+
+ final int hash = hashKey(bytes, offset, length);
+ // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
+ if (hash == 0) {
+ return encoding.decode(bytes, offset, length);
+ }
+ cleanQueue();
+ // create a TempKey with the byte[] given
+ final TempKey tempKey = new TempKey(hash, bytes, offset, length);
+ SoftReference ref = cache.get(tempKey);
+ if (ref != null) {
+ final String val = ref.get();
+ if (val != null) {
+ return val;
+ }
+ }
+
+ return new String(bytes, offset, length, StandardCharsets.US_ASCII);
}
- @Override
- void appendString(StringBuilder sb) {
- for (int i = offset, j = offset + length; i < j; i++) {
- sb.append((char) bytes[i]);
- }
- }
- }
-
- /**
- * Instance used for inserting values into the cache. The {@code byte[]} must be a copy
- * that will never be mutated.
- */
- private static final class Key extends BaseKey {
- final byte[] key;
-
- Key(byte[] key, int hash) {
- super(hash);
- this.key = key;
+ /**
+ * Process any entries in {@link #refQueue} to purge from the {@link #cache}.
+ *
+ * @see StringReference#dispose()
+ */
+ private void cleanQueue() {
+ Reference> ref;
+ while ((ref = refQueue.poll()) != null) {
+ ((StringReference) ref).dispose();
+ }
}
/**
* {@inheritDoc}
*/
@Override
- boolean equalsBytes(BaseKey other) {
- return other.equals(key, 0, key.length);
+ public String toString() {
+ final StringBuilder sb = new StringBuilder(32 + (8 * cache.size()));
+ sb.append("AsciiStringInterner [");
+ cache.forEach((k, v) -> {
+ sb.append('\'');
+ k.appendString(sb);
+ sb.append("', ");
+ });
+ //replace trailing ', ' with ']';
+ final int length = sb.length();
+ if (length > 21) {
+ sb.setLength(sb.length() - 2);
+ }
+ sb.append(']');
+ return sb.toString();
}
- @Override
- public boolean equals(byte[] other, int offset, int length) {
- return arrayEquals(this.key, 0, this.key.length, other, offset, length);
+ private abstract static class BaseKey {
+ private final int hash;
+
+ BaseKey(int hash) {
+ this.hash = hash;
+ }
+
+ @Override
+ public final int hashCode() {
+ return hash;
+ }
+
+ @Override
+ public final boolean equals(Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof BaseKey)) {
+ return false;
+ }
+ final BaseKey other = (BaseKey) obj;
+ return equalsBytes(other);
+ }
+
+ abstract boolean equalsBytes(BaseKey other);
+
+ abstract boolean equals(byte[] other, int offset, int length);
+
+ abstract void appendString(StringBuilder sb);
}
/**
- * {@inheritDoc}
+ * Only used for lookups, never to actually store entries.
*/
- @Override
- void appendString(StringBuilder sb) {
- for (int i = 0; i < key.length; i++) {
- sb.append((char) key[i]);
- }
- }
- }
+ private static class TempKey extends BaseKey {
+ final byte[] bytes;
+ final int offset;
+ final int length;
- /**
- * Custom {@link SoftReference} implementation which maintains a reference to the key in the cache,
- * which allows aggressive cleaning when garbage collector collects the {@code String} instance.
- */
- private final class StringReference extends SoftReference {
+ TempKey(int hash, byte[] bytes, int offset, int length) {
+ super(hash);
+ this.bytes = bytes;
+ this.offset = offset;
+ this.length = length;
+ }
- private final BaseKey key;
+ @Override
+ boolean equalsBytes(BaseKey other) {
+ return other.equals(bytes, offset, length);
+ }
- StringReference(BaseKey key, String referent) {
- super(referent, refQueue);
- this.key = key;
+ @Override
+ public boolean equals(byte[] other, int offset, int length) {
+ return arrayEquals(this.bytes, this.offset, this.length, other, offset, length);
+ }
+
+ @Override
+ void appendString(StringBuilder sb) {
+ for (int i = offset, j = offset + length; i < j; i++) {
+ sb.append((char) bytes[i]);
+ }
+ }
}
- void dispose() {
- cache.remove(key, this);
- }
- }
+ /**
+ * Instance used for inserting values into the cache. The {@code byte[]} must be a copy
+ * that will never be mutated.
+ */
+ private static final class Key extends BaseKey {
+ final byte[] key;
- /**
- * Contains the canonicalized values, keyed by the ascii {@code byte[]}.
- */
- final ConcurrentMap> cache = new ConcurrentHashMap<>(128);
+ Key(byte[] key, int hash) {
+ super(hash);
+ this.key = key;
+ }
- /**
- * Used for {@link Reference} as values in {@code cache}.
- */
- final ReferenceQueue refQueue = new ReferenceQueue<>();
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ boolean equalsBytes(BaseKey other) {
+ return other.equals(key, 0, key.length);
+ }
- /**
- * Preemptively populates a value into the cache. This is intended to be used with {@code String} constants
- * which are frequently used. While this can work with other {@code String} values, if val is ever
- * garbage collected, it will not be actively removed from this instance.
- *
- * @param val The value to intern. Must not be {@code null}.
- * @return Indication if val is an ascii String and placed into cache.
- */
- public boolean putString(String val) {
- //ask for utf-8 so that we can detect if any of the characters are not ascii
- final byte[] copy = val.getBytes(StandardCharsets.UTF_8);
- final int hash = hashKey(copy, 0, copy.length);
- if (hash == 0) {
- return false;
- }
- final Key key = new Key(copy, hash);
- //we are assuming this is a java interned string from , so this is unlikely to ever be
- //reclaimed. so there is no value in using the custom StringReference or hand off to
- //the refQueue.
- //on the outside chance it actually does get reclaimed, it will just hang around as an
- //empty reference in the map unless/until attempted to be retrieved
- cache.put(key, new SoftReference(val));
- return true;
- }
+ @Override
+ public boolean equals(byte[] other, int offset, int length) {
+ return arrayEquals(this.key, 0, this.key.length, other, offset, length);
+ }
- /**
- * Produces a {@link String} instance for the given bytes. If all are valid ascii (i.e. {@code >= 0})
- * either an existing value will be returned, or the newly created {@code String} will be stored before being
- * returned.
- *
- *
- * If non-ascii bytes are discovered, the encoding will be used to
- * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
- *
- *
- * @param bytes The bytes of the String. Must not be {@code null}.
- * @param offset Offset into bytes to start.
- * @param length The number of bytes in bytes which are relevant.
- * @param encoding To use if non-ascii bytes seen.
- * @return Decoded {@code String} from bytes.
- * @throws IOException If error decoding from Encoding.
- */
- public String getString(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
- if (length == 0) {
- return "";
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ void appendString(StringBuilder sb) {
+ for (int i = 0; i < key.length; i++) {
+ sb.append((char) key[i]);
+ }
+ }
}
- final int hash = hashKey(bytes, offset, length);
- // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
- if (hash == 0) {
- return encoding.decode(bytes, offset, length);
- }
- cleanQueue();
- // create a TempKey with the byte[] given
- final TempKey tempKey = new TempKey(hash, bytes, offset, length);
- SoftReference ref = cache.get(tempKey);
- if (ref != null) {
- final String val = ref.get();
- if (val != null) {
- return val;
- }
- }
- // in order to insert we need to create a "real" key with copy of bytes that will not be changed
- final byte[] copy = Arrays.copyOfRange(bytes, offset, offset + length);
- final Key key = new Key(copy, hash);
- final String value = new String(copy, StandardCharsets.US_ASCII);
+ /**
+ * Custom {@link SoftReference} implementation which maintains a reference to the key in the cache,
+ * which allows aggressive cleaning when garbage collector collects the {@code String} instance.
+ */
+ private final class StringReference extends SoftReference {
- // handle case where a concurrent thread has populated the map or existing value has cleared reference
- ref = cache.compute(key, (k, v) -> {
- if (v == null) {
- return new StringReference(key, value);
- }
- final String val = v.get();
- return val != null ? v : new StringReference(key, value);
- });
+ private final BaseKey key;
- return ref.get();
- }
+ StringReference(BaseKey key, String referent) {
+ super(referent, refQueue);
+ this.key = key;
+ }
- /**
- * Produces a {@link String} instance for the given bytes.
- *
- *
- * If all are valid ascii (i.e. {@code >= 0}) and a corresponding {@code String} value exists, it
- * will be returned. If no value exists, a {@code String} will be created, but not stored.
- *
- *
- *
- * If non-ascii bytes are discovered, the encoding will be used to
- * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
- *
- *
- * @param bytes The bytes of the String. Must not be {@code null}.
- * @param offset Offset into bytes to start.
- * @param length The number of bytes in bytes which are relevant.
- * @param encoding To use if non-ascii bytes seen.
- * @return Decoded {@code String} from bytes.
- * @throws IOException If error decoding from Encoding.
- */
- public String getStringIfPresent(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
- if (length == 0) {
- return "";
+ void dispose() {
+ cache.remove(key, this);
+ }
}
-
- final int hash = hashKey(bytes, offset, length);
- // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
- if (hash == 0) {
- return encoding.decode(bytes, offset, length);
- }
- cleanQueue();
- // create a TempKey with the byte[] given
- final TempKey tempKey = new TempKey(hash, bytes, offset, length);
- SoftReference ref = cache.get(tempKey);
- if (ref != null) {
- final String val = ref.get();
- if (val != null) {
- return val;
- }
- }
-
- return new String(bytes, offset, length, StandardCharsets.US_ASCII);
- }
-
- /**
- * Process any entries in {@link #refQueue} to purge from the {@link #cache}.
- * @see StringReference#dispose()
- */
- private void cleanQueue() {
- Reference> ref;
- while ((ref = refQueue.poll()) != null) {
- ((StringReference) ref).dispose();
- }
- }
-
- /**
- * Generates a hash value for the relevant entries in bytes as long as all values are ascii ({@code >= 0}).
- * @return hash code for relevant bytes, or {@code 0} if non-ascii bytes present.
- */
- private static int hashKey(byte[] bytes, int offset, int length) {
- int result = 1;
- for (int i = offset, j = offset + length; i < j; i++) {
- final byte b = bytes[i];
- // bytes are signed values. all ascii values are positive
- if (b < 0) {
- return 0;
- }
- result = 31 * result + b;
- }
- return result;
- }
-
- /**
- * Performs equality check between a and b (with corresponding offset/length values).
- *
- * The {@code static boolean equals(byte[].class, int, int, byte[], int, int} method in {@link java.util.Arrays}
- * is optimized for longer {@code byte[]} instances than is expected to be seen here.
- *
- */
- static boolean arrayEquals(byte[] a, int aOffset, int aLength, byte[] b, int bOffset, int bLength) {
- if (aLength != bLength) {
- return false;
- }
- //TODO: in jdk9, could use VarHandle to read 4 bytes at a time as an int for comparison
- // or 8 bytes as a long - though we likely expect short values here
- for (int i = 0; i < aLength; i++) {
- if (a[aOffset + i] != b[bOffset + i]) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String toString() {
- final StringBuilder sb = new StringBuilder(32 + (8 * cache.size()));
- sb.append("AsciiStringInterner [");
- cache.forEach((k, v) -> {
- sb.append('\'');
- k.appendString(sb);
- sb.append("', ");
- });
- //replace trailing ', ' with ']';
- final int length = sb.length();
- if (length > 21) {
- sb.setLength(sb.length() - 2);
- }
- sb.append(']');
- return sb.toString();
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java b/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java
index 35dcb79..f696efb 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java
@@ -5,6 +5,11 @@
package org.postgresql.core;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.TimerTask;
+import java.util.logging.Logger;
import org.postgresql.PGConnection;
import org.postgresql.PGProperty;
import org.postgresql.jdbc.FieldMetadata;
@@ -12,225 +17,220 @@ import org.postgresql.jdbc.TimestampUtils;
import org.postgresql.util.LruCache;
import org.postgresql.xml.PGXmlFactoryFactory;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.TimerTask;
-import java.util.logging.Logger;
-
/**
* Driver-internal connection interface. Application code should not use this interface.
*/
public interface BaseConnection extends PGConnection, Connection {
- /**
- * Cancel the current query executing on this connection.
- *
- * @throws SQLException if something goes wrong.
- */
- @Override
- void cancelQuery() throws SQLException;
+ /**
+ * Cancel the current query executing on this connection.
+ *
+ * @throws SQLException if something goes wrong.
+ */
+ @Override
+ void cancelQuery() throws SQLException;
- /**
- * Execute a SQL query that returns a single resultset. Never causes a new transaction to be
- * started regardless of the autocommit setting.
- *
- * @param s the query to execute
- * @return the (non-null) returned resultset
- * @throws SQLException if something goes wrong.
- */
- ResultSet execSQLQuery(String s) throws SQLException;
+ /**
+ * Execute a SQL query that returns a single resultset. Never causes a new transaction to be
+ * started regardless of the autocommit setting.
+ *
+ * @param s the query to execute
+ * @return the (non-null) returned resultset
+ * @throws SQLException if something goes wrong.
+ */
+ ResultSet execSQLQuery(String s) throws SQLException;
- ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
- throws SQLException;
+ ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
+ throws SQLException;
- /**
- * Execute a SQL query that does not return results. Never causes a new transaction to be started
- * regardless of the autocommit setting.
- *
- * @param s the query to execute
- * @throws SQLException if something goes wrong.
- */
- void execSQLUpdate(String s) throws SQLException;
+ /**
+ * Execute a SQL query that does not return results. Never causes a new transaction to be started
+ * regardless of the autocommit setting.
+ *
+ * @param s the query to execute
+ * @throws SQLException if something goes wrong.
+ */
+ void execSQLUpdate(String s) throws SQLException;
- /**
- * Get the QueryExecutor implementation for this connection.
- *
- * @return the (non-null) executor
- */
- QueryExecutor getQueryExecutor();
+ /**
+ * Get the QueryExecutor implementation for this connection.
+ *
+ * @return the (non-null) executor
+ */
+ QueryExecutor getQueryExecutor();
- /**
- * Internal protocol for work with physical and logical replication. Physical replication available
- * only since PostgreSQL version 9.1. Logical replication available only since PostgreSQL version 9.4.
- *
- * @return not null replication protocol
- */
- ReplicationProtocol getReplicationProtocol();
+ /**
+ * Internal protocol for work with physical and logical replication. Physical replication available
+ * only since PostgreSQL version 9.1. Logical replication available only since PostgreSQL version 9.4.
+ *
+ * @return not null replication protocol
+ */
+ ReplicationProtocol getReplicationProtocol();
- /**
- *
Construct and return an appropriate object for the given type and value. This only considers
- * the types registered via {@link org.postgresql.PGConnection#addDataType(String, Class)} and
- * {@link org.postgresql.PGConnection#addDataType(String, String)}.
- *
- *
If no class is registered as handling the given type, then a generic
- * {@link org.postgresql.util.PGobject} instance is returned.
- *
- *
value or byteValue must be non-null
- * @param type the backend typename
- * @param value the type-specific string representation of the value
- * @param byteValue the type-specific binary representation of the value
- * @return an appropriate object; never null.
- * @throws SQLException if something goes wrong
- */
- Object getObject(String type, String value, byte [] byteValue)
- throws SQLException;
+ /**
+ *
Construct and return an appropriate object for the given type and value. This only considers
+ * the types registered via {@link org.postgresql.PGConnection#addDataType(String, Class)} and
+ * {@link org.postgresql.PGConnection#addDataType(String, String)}.
+ *
+ *
If no class is registered as handling the given type, then a generic
+ * {@link org.postgresql.util.PGobject} instance is returned.
+ *
+ *
value or byteValue must be non-null
+ *
+ * @param type the backend typename
+ * @param value the type-specific string representation of the value
+ * @param byteValue the type-specific binary representation of the value
+ * @return an appropriate object; never null.
+ * @throws SQLException if something goes wrong
+ */
+ Object getObject(String type, String value, byte[] byteValue)
+ throws SQLException;
- Encoding getEncoding() throws SQLException;
+ Encoding getEncoding() throws SQLException;
- TypeInfo getTypeInfo();
+ TypeInfo getTypeInfo();
- /**
- *
Check if we have at least a particular server version.
- *
- *
The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
- * is 90012.
- *
- * @param ver the server version to check, of the form xxyyzz eg 90401
- * @return true if the server version is at least "ver".
- */
- boolean haveMinimumServerVersion(int ver);
+ /**
+ *
Check if we have at least a particular server version.
+ *
+ *
The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
+ * is 90012.
+ *
+ * @param ver the server version to check, of the form xxyyzz eg 90401
+ * @return true if the server version is at least "ver".
+ */
+ boolean haveMinimumServerVersion(int ver);
- /**
- *
Check if we have at least a particular server version.
- *
- *
The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
- * is 90012.
- *
- * @param ver the server version to check
- * @return true if the server version is at least "ver".
- */
- boolean haveMinimumServerVersion(Version ver);
+ /**
+ *
Check if we have at least a particular server version.
+ *
+ *
The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
+ * is 90012.
+ *
+ * @param ver the server version to check
+ * @return true if the server version is at least "ver".
+ */
+ boolean haveMinimumServerVersion(Version ver);
- /**
- * Encode a string using the database's client_encoding (usually UTF8, but can vary on older
- * server versions). This is used when constructing synthetic resultsets (for example, in metadata
- * methods).
- *
- * @param str the string to encode
- * @return an encoded representation of the string
- * @throws SQLException if something goes wrong.
- */
- byte[] encodeString(String str) throws SQLException;
+ /**
+ * Encode a string using the database's client_encoding (usually UTF8, but can vary on older
+ * server versions). This is used when constructing synthetic resultsets (for example, in metadata
+ * methods).
+ *
+ * @param str the string to encode
+ * @return an encoded representation of the string
+ * @throws SQLException if something goes wrong.
+ */
+ byte[] encodeString(String str) throws SQLException;
- /**
- * Escapes a string for use as string-literal within an SQL command. The method chooses the
- * applicable escaping rules based on the value of {@link #getStandardConformingStrings()}.
- *
- * @param str a string value
- * @return the escaped representation of the string
- * @throws SQLException if the string contains a {@code \0} character
- */
- String escapeString(String str) throws SQLException;
+ /**
+ * Escapes a string for use as string-literal within an SQL command. The method chooses the
+ * applicable escaping rules based on the value of {@link #getStandardConformingStrings()}.
+ *
+ * @param str a string value
+ * @return the escaped representation of the string
+ * @throws SQLException if the string contains a {@code \0} character
+ */
+ String escapeString(String str) throws SQLException;
- /**
- * Returns whether the server treats string-literals according to the SQL standard or if it uses
- * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape
- * characters in string-literals. Since 8.2, this depends on the value of the
- * {@code standard_conforming_strings} server variable.
- *
- * @return true if the server treats string literals according to the SQL standard
- * @see QueryExecutor#getStandardConformingStrings()
- */
- boolean getStandardConformingStrings();
+ /**
+ * Returns whether the server treats string-literals according to the SQL standard or if it uses
+ * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape
+ * characters in string-literals. Since 8.2, this depends on the value of the
+ * {@code standard_conforming_strings} server variable.
+ *
+ * @return true if the server treats string literals according to the SQL standard
+ * @see QueryExecutor#getStandardConformingStrings()
+ */
+ boolean getStandardConformingStrings();
- // Ew. Quick hack to give access to the connection-specific utils implementation.
- @Deprecated
- TimestampUtils getTimestampUtils();
+ // Ew. Quick hack to give access to the connection-specific utils implementation.
+ @Deprecated
+ TimestampUtils getTimestampUtils();
- // Get the per-connection logger.
- Logger getLogger();
+ // Get the per-connection logger.
+ Logger getLogger();
- // Get the bind-string-as-varchar config flag
- boolean getStringVarcharFlag();
+ // Get the bind-string-as-varchar config flag
+ boolean getStringVarcharFlag();
- /**
- * Get the current transaction state of this connection.
- *
- * @return current transaction state of this connection
- */
- TransactionState getTransactionState();
+ /**
+ * Get the current transaction state of this connection.
+ *
+ * @return current transaction state of this connection
+ */
+ TransactionState getTransactionState();
- /**
- * Returns true if value for the given oid should be sent using binary transfer. False if value
- * should be sent using text transfer.
- *
- * @param oid The oid to check.
- * @return True for binary transfer, false for text transfer.
- */
- boolean binaryTransferSend(int oid);
+ /**
+ * Returns true if value for the given oid should be sent using binary transfer. False if value
+ * should be sent using text transfer.
+ *
+ * @param oid The oid to check.
+ * @return True for binary transfer, false for text transfer.
+ */
+ boolean binaryTransferSend(int oid);
- /**
- * Return whether to disable column name sanitation.
- *
- * @return true column sanitizer is disabled
- */
- boolean isColumnSanitiserDisabled();
+ /**
+ * Return whether to disable column name sanitation.
+ *
+ * @return true column sanitizer is disabled
+ */
+ boolean isColumnSanitiserDisabled();
- /**
- * Schedule a TimerTask for later execution. The task will be scheduled with the shared Timer for
- * this connection.
- *
- * @param timerTask timer task to schedule
- * @param milliSeconds delay in milliseconds
- */
- void addTimerTask(TimerTask timerTask, long milliSeconds);
+ /**
+ * Schedule a TimerTask for later execution. The task will be scheduled with the shared Timer for
+ * this connection.
+ *
+ * @param timerTask timer task to schedule
+ * @param milliSeconds delay in milliseconds
+ */
+ void addTimerTask(TimerTask timerTask, long milliSeconds);
- /**
- * Invoke purge() on the underlying shared Timer so that internal resources will be released.
- */
- void purgeTimerTasks();
+ /**
+ * Invoke purge() on the underlying shared Timer so that internal resources will be released.
+ */
+ void purgeTimerTasks();
- /**
- * Return metadata cache for given connection.
- *
- * @return metadata cache
- */
- LruCache getFieldMetadataCache();
+ /**
+ * Return metadata cache for given connection.
+ *
+ * @return metadata cache
+ */
+ LruCache getFieldMetadataCache();
- CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
- String... columnNames)
- throws SQLException;
+ CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
+ String... columnNames)
+ throws SQLException;
- /**
- * By default, the connection resets statement cache in case deallocate all/discard all
- * message is observed.
- * This API allows to disable that feature for testing purposes.
- *
- * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
- */
- void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
+ /**
+ * By default, the connection resets statement cache in case deallocate all/discard all
+ * message is observed.
+ * This API allows to disable that feature for testing purposes.
+ *
+ * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
+ */
+ void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
- /**
- * Indicates if statements to backend should be hinted as read only.
- *
- * @return Indication if hints to backend (such as when transaction begins)
- * should be read only.
- * @see PGProperty#READ_ONLY_MODE
- */
- boolean hintReadOnly();
+ /**
+ * Indicates if statements to backend should be hinted as read only.
+ *
+ * @return Indication if hints to backend (such as when transaction begins)
+ * should be read only.
+ * @see PGProperty#READ_ONLY_MODE
+ */
+ boolean hintReadOnly();
- /**
- * Retrieve the factory to instantiate XML processing factories.
- *
- * @return The factory to use to instantiate XML processing factories
- * @throws SQLException if the class cannot be found or instantiated.
- */
- PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException;
+ /**
+ * Retrieve the factory to instantiate XML processing factories.
+ *
+ * @return The factory to use to instantiate XML processing factories
+ * @throws SQLException if the class cannot be found or instantiated.
+ */
+ PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException;
- /**
- * Indicates if error details from server used in included in logging and exceptions.
- *
- * @return true if should be included and passed on to other exceptions
- */
- boolean getLogServerErrorDetail();
+ /**
+ * Indicates if error details from server used in included in logging and exceptions.
+ *
+ * @return true if should be included and passed on to other exceptions
+ */
+ boolean getLogServerErrorDetail();
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java b/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java
index d9d4aea..798295d 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java
@@ -14,59 +14,59 @@ import org.postgresql.util.CanEstimateSize;
* as a cache key.
*/
class BaseQueryKey implements CanEstimateSize {
- public final String sql;
- public final boolean isParameterized;
- public final boolean escapeProcessing;
+ public final String sql;
+ public final boolean isParameterized;
+ public final boolean escapeProcessing;
- BaseQueryKey(String sql, boolean isParameterized, boolean escapeProcessing) {
- this.sql = sql;
- this.isParameterized = isParameterized;
- this.escapeProcessing = escapeProcessing;
- }
-
- @Override
- public String toString() {
- return "BaseQueryKey{"
- + "sql='" + sql + '\''
- + ", isParameterized=" + isParameterized
- + ", escapeProcessing=" + escapeProcessing
- + '}';
- }
-
- @Override
- public long getSize() {
- if (sql == null) { // just in case
- return 16;
- }
- return 16 + sql.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
+ BaseQueryKey(String sql, boolean isParameterized, boolean escapeProcessing) {
+ this.sql = sql;
+ this.isParameterized = isParameterized;
+ this.escapeProcessing = escapeProcessing;
}
- BaseQueryKey that = (BaseQueryKey) o;
-
- if (isParameterized != that.isParameterized) {
- return false;
+ @Override
+ public String toString() {
+ return "BaseQueryKey{"
+ + "sql='" + sql + '\''
+ + ", isParameterized=" + isParameterized
+ + ", escapeProcessing=" + escapeProcessing
+ + '}';
}
- if (escapeProcessing != that.escapeProcessing) {
- return false;
+
+ @Override
+ public long getSize() {
+ if (sql == null) { // just in case
+ return 16;
+ }
+ return 16 + sql.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
}
- return sql != null ? sql.equals(that.sql) : that.sql == null;
- }
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
- @Override
- public int hashCode() {
- int result = sql != null ? sql.hashCode() : 0;
- result = 31 * result + (isParameterized ? 1 : 0);
- result = 31 * result + (escapeProcessing ? 1 : 0);
- return result;
- }
+ BaseQueryKey that = (BaseQueryKey) o;
+
+ if (isParameterized != that.isParameterized) {
+ return false;
+ }
+ if (escapeProcessing != that.escapeProcessing) {
+ return false;
+ }
+ return sql != null ? sql.equals(that.sql) : that.sql == null;
+
+ }
+
+ @Override
+ public int hashCode() {
+ int result = sql != null ? sql.hashCode() : 0;
+ result = 31 * result + (isParameterized ? 1 : 0);
+ result = 31 * result + (escapeProcessing ? 1 : 0);
+ return result;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java b/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java
index d7f8a66..f171442 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java
@@ -5,71 +5,70 @@
package org.postgresql.core;
-import org.postgresql.PGStatement;
-
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
+import org.postgresql.PGStatement;
/**
* Driver-internal statement interface. Application code should not use this interface.
*/
public interface BaseStatement extends PGStatement, Statement {
- /**
- * Create a synthetic resultset from data provided by the driver.
- *
- * @param fields the column metadata for the resultset
- * @param tuples the resultset data
- * @return the new ResultSet
- * @throws SQLException if something goes wrong
- */
- ResultSet createDriverResultSet(Field[] fields, List tuples) throws SQLException;
+ /**
+ * Create a synthetic resultset from data provided by the driver.
+ *
+ * @param fields the column metadata for the resultset
+ * @param tuples the resultset data
+ * @return the new ResultSet
+ * @throws SQLException if something goes wrong
+ */
+ ResultSet createDriverResultSet(Field[] fields, List tuples) throws SQLException;
- /**
- * Create a resultset from data retrieved from the server.
- *
- * @param originalQuery the query that generated this resultset; used when dealing with updateable
- * resultsets
- * @param fields the column metadata for the resultset
- * @param tuples the resultset data
- * @param cursor the cursor to use to retrieve more data from the server; if null, no additional
- * data is present.
- * @return the new ResultSet
- * @throws SQLException if something goes wrong
- */
- ResultSet createResultSet(Query originalQuery, Field[] fields, List tuples,
- ResultCursor cursor) throws SQLException;
+ /**
+ * Create a resultset from data retrieved from the server.
+ *
+ * @param originalQuery the query that generated this resultset; used when dealing with updateable
+ * resultsets
+ * @param fields the column metadata for the resultset
+ * @param tuples the resultset data
+ * @param cursor the cursor to use to retrieve more data from the server; if null, no additional
+ * data is present.
+ * @return the new ResultSet
+ * @throws SQLException if something goes wrong
+ */
+ ResultSet createResultSet(Query originalQuery, Field[] fields, List tuples,
+ ResultCursor cursor) throws SQLException;
- /**
- * Execute a query, passing additional query flags.
- *
- * @param sql the query to execute (JDBC-style query)
- * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
- * the default flags.
- * @return true if there is a result set
- * @throws SQLException if something goes wrong.
- */
- boolean executeWithFlags(String sql, int flags) throws SQLException;
+ /**
+ * Execute a query, passing additional query flags.
+ *
+ * @param sql the query to execute (JDBC-style query)
+ * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
+ * the default flags.
+ * @return true if there is a result set
+ * @throws SQLException if something goes wrong.
+ */
+ boolean executeWithFlags(String sql, int flags) throws SQLException;
- /**
- * Execute a query, passing additional query flags.
- *
- * @param cachedQuery the query to execute (native to PostgreSQL)
- * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
- * the default flags.
- * @return true if there is a result set
- * @throws SQLException if something goes wrong.
- */
- boolean executeWithFlags(CachedQuery cachedQuery, int flags) throws SQLException;
+ /**
+ * Execute a query, passing additional query flags.
+ *
+ * @param cachedQuery the query to execute (native to PostgreSQL)
+ * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
+ * the default flags.
+ * @return true if there is a result set
+ * @throws SQLException if something goes wrong.
+ */
+ boolean executeWithFlags(CachedQuery cachedQuery, int flags) throws SQLException;
- /**
- * Execute a prepared query, passing additional query flags.
- *
- * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
- * the default flags.
- * @return true if there is a result set
- * @throws SQLException if something goes wrong.
- */
- boolean executeWithFlags(int flags) throws SQLException;
+ /**
+ * Execute a prepared query, passing additional query flags.
+ *
+ * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
+ * the default flags.
+ * @return true if there is a result set
+ * @throws SQLException if something goes wrong.
+ */
+ boolean executeWithFlags(int flags) throws SQLException;
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java b/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java
index 23ac4cd..3a50f10 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java
@@ -12,64 +12,64 @@ import org.postgresql.util.CanEstimateSize;
* the same query through {@link java.sql.Connection#prepareStatement(String)}.
*/
public class CachedQuery implements CanEstimateSize {
- /**
- * Cache key. {@link String} or {@code org.postgresql.util.CanEstimateSize}.
- */
- public final Object key;
- public final Query query;
- public final boolean isFunction;
+ /**
+ * Cache key. {@link String} or {@code org.postgresql.util.CanEstimateSize}.
+ */
+ public final Object key;
+ public final Query query;
+ public final boolean isFunction;
- private int executeCount;
+ private int executeCount;
- public CachedQuery(Object key, Query query, boolean isFunction) {
- assert key instanceof String || key instanceof CanEstimateSize
- : "CachedQuery.key should either be String or implement CanEstimateSize."
- + " Actual class is " + key.getClass();
- this.key = key;
- this.query = query;
- this.isFunction = isFunction;
- }
-
- public void increaseExecuteCount() {
- if (executeCount < Integer.MAX_VALUE) {
- executeCount++;
+ public CachedQuery(Object key, Query query, boolean isFunction) {
+ assert key instanceof String || key instanceof CanEstimateSize
+ : "CachedQuery.key should either be String or implement CanEstimateSize."
+ + " Actual class is " + key.getClass();
+ this.key = key;
+ this.query = query;
+ this.isFunction = isFunction;
}
- }
- public void increaseExecuteCount(int inc) {
- int newValue = executeCount + inc;
- if (newValue > 0) { // if overflows, just ignore the update
- executeCount = newValue;
+ public void increaseExecuteCount() {
+ if (executeCount < Integer.MAX_VALUE) {
+ executeCount++;
+ }
}
- }
- /**
- * Number of times this statement has been used.
- *
- * @return number of times this statement has been used
- */
- public int getExecuteCount() {
- return executeCount;
- }
-
- @Override
- public long getSize() {
- long queryLength;
- if (key instanceof String) {
- queryLength = ((String) key).length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
- } else {
- queryLength = ((CanEstimateSize) key).getSize();
+ public void increaseExecuteCount(int inc) {
+ int newValue = executeCount + inc;
+ if (newValue > 0) { // if overflows, just ignore the update
+ executeCount = newValue;
+ }
}
- return queryLength * 2 /* original query and native sql */
- + 100L /* entry in hash map, CachedQuery wrapper, etc */;
- }
- @Override
- public String toString() {
- return "CachedQuery{"
- + "executeCount=" + executeCount
- + ", query=" + query
- + ", isFunction=" + isFunction
- + '}';
- }
+ /**
+ * Number of times this statement has been used.
+ *
+ * @return number of times this statement has been used
+ */
+ public int getExecuteCount() {
+ return executeCount;
+ }
+
+ @Override
+ public long getSize() {
+ long queryLength;
+ if (key instanceof String) {
+ queryLength = ((String) key).length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
+ } else {
+ queryLength = ((CanEstimateSize) key).getSize();
+ }
+ return queryLength * 2 /* original query and native sql */
+ + 100L /* entry in hash map, CachedQuery wrapper, etc */;
+ }
+
+ @Override
+ public String toString() {
+ return "CachedQuery{"
+ + "executeCount=" + executeCount
+ + ", query=" + query
+ + ", isFunction=" + isFunction
+ + '}';
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java b/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java
index 90af15d..c1181a7 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java
@@ -5,68 +5,67 @@
package org.postgresql.core;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.util.LruCache;
-
import java.sql.SQLException;
import java.util.List;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.util.LruCache;
/**
* Creates an instance of {@link CachedQuery} for a given connection.
*/
class CachedQueryCreateAction implements LruCache.CreateAction
*/
public class ResultHandlerDelegate implements ResultHandler {
- private final ResultHandler delegate;
+ private final ResultHandler delegate;
- public ResultHandlerDelegate(ResultHandler delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
- ResultCursor cursor) {
- if (delegate != null) {
- delegate.handleResultRows(fromQuery, fields, tuples, cursor);
+ public ResultHandlerDelegate(ResultHandler delegate) {
+ this.delegate = delegate;
}
- }
- @Override
- public void handleCommandStatus(String status, long updateCount, long insertOID) {
- if (delegate != null) {
- delegate.handleCommandStatus(status, updateCount, insertOID);
+ @Override
+ public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
+ ResultCursor cursor) {
+ if (delegate != null) {
+ delegate.handleResultRows(fromQuery, fields, tuples, cursor);
+ }
}
- }
- @Override
- public void handleWarning(SQLWarning warning) {
- if (delegate != null) {
- delegate.handleWarning(warning);
+ @Override
+ public void handleCommandStatus(String status, long updateCount, long insertOID) {
+ if (delegate != null) {
+ delegate.handleCommandStatus(status, updateCount, insertOID);
+ }
}
- }
- @Override
- public void handleError(SQLException error) {
- if (delegate != null) {
- delegate.handleError(error);
+ @Override
+ public void handleWarning(SQLWarning warning) {
+ if (delegate != null) {
+ delegate.handleWarning(warning);
+ }
}
- }
- @Override
- public void handleCompletion() throws SQLException {
- if (delegate != null) {
- delegate.handleCompletion();
+ @Override
+ public void handleError(SQLException error) {
+ if (delegate != null) {
+ delegate.handleError(error);
+ }
}
- }
- @Override
- public void secureProgress() {
- if (delegate != null) {
- delegate.secureProgress();
+ @Override
+ public void handleCompletion() throws SQLException {
+ if (delegate != null) {
+ delegate.handleCompletion();
+ }
}
- }
- @Override
- public SQLException getException() {
- if (delegate != null) {
- return delegate.getException();
+ @Override
+ public void secureProgress() {
+ if (delegate != null) {
+ delegate.secureProgress();
+ }
}
- return null;
- }
- @Override
- public SQLWarning getWarning() {
- if (delegate != null) {
- return delegate.getWarning();
+ @Override
+ public SQLException getException() {
+ if (delegate != null) {
+ return delegate.getException();
+ }
+ return null;
+ }
+
+ @Override
+ public SQLWarning getWarning() {
+ if (delegate != null) {
+ return delegate.getWarning();
+ }
+ return null;
}
- return null;
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java b/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java
index d8ec12a..5f41bd4 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java
@@ -13,136 +13,125 @@ import java.text.ParsePosition;
*/
public enum ServerVersion implements Version {
- INVALID("0.0.0"),
- v8_2("8.2.0"),
- v8_3("8.3.0"),
- v8_4("8.4.0"),
- v9_0("9.0.0"),
- v9_1("9.1.0"),
- v9_2("9.2.0"),
- v9_3("9.3.0"),
- v9_4("9.4.0"),
- v9_5("9.5.0"),
- v9_6("9.6.0"),
- v10("10"),
- v11("11"),
- v12("12"),
- v13("13"),
- v14("14"),
- v15("15"),
- v16("16")
- ;
+ INVALID("0.0.0"),
+ v8_2("8.2.0"),
+ v8_3("8.3.0"),
+ v8_4("8.4.0"),
+ v9_0("9.0.0"),
+ v9_1("9.1.0"),
+ v9_2("9.2.0"),
+ v9_3("9.3.0"),
+ v9_4("9.4.0"),
+ v9_5("9.5.0"),
+ v9_6("9.6.0"),
+ v10("10"),
+ v11("11"),
+ v12("12"),
+ v13("13"),
+ v14("14"),
+ v15("15"),
+ v16("16");
- private final int version;
+ private final int version;
- ServerVersion(String version) {
- this.version = parseServerVersionStr(version);
- }
+ ServerVersion(String version) {
+ this.version = parseServerVersionStr(version);
+ }
- /**
- * Get a machine-readable version number.
- *
- * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
- */
- @Override
- public int getVersionNum() {
- return version;
- }
+ /**
+ *
Attempt to parse the server version string into an XXYYZZ form version number into a
+ * {@link Version}.
+ *
+ *
If the specified version cannot be parsed, the {@link Version#getVersionNum()} will return 0.
+ *
+ * @param version version in numeric XXYYZZ form, e.g. "090401" for 9.4.1
+ * @return a {@link Version} representing the specified version string.
+ */
+ public static Version from(String version) {
+ final int versionNum = parseServerVersionStr(version);
+ return new Version() {
+ @Override
+ public int getVersionNum() {
+ return versionNum;
+ }
- /**
- *
Attempt to parse the server version string into an XXYYZZ form version number into a
- * {@link Version}.
- *
- *
If the specified version cannot be parsed, the {@link Version#getVersionNum()} will return 0.
- *
- * @param version version in numeric XXYYZZ form, e.g. "090401" for 9.4.1
- * @return a {@link Version} representing the specified version string.
- */
- public static Version from(String version) {
- final int versionNum = parseServerVersionStr(version);
- return new Version() {
- @Override
- public int getVersionNum() {
- return versionNum;
- }
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof Version) {
+ return this.getVersionNum() == ((Version) obj).getVersionNum();
+ }
+ return false;
+ }
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof Version) {
- return this.getVersionNum() == ((Version) obj).getVersionNum();
+ @Override
+ public int hashCode() {
+ return getVersionNum();
+ }
+
+ @Override
+ public String toString() {
+ return Integer.toString(versionNum);
+ }
+ };
+ }
+
+ /**
+ *
Attempt to parse the server version string into an XXYYZZ form version number.
+ *
+ *
Returns 0 if the version could not be parsed.
+ *
+ *
Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
+ * releases.
+ *
+ *
If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
+ * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).
+ *
+ *
The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
+ * version part is out of range.
+ *
+ * @param serverVersion server version in a XXYYZZ form
+ * @return server version in number form
+ */
+ static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
+ if (serverVersion == null) {
+ return 0;
}
- return false;
- }
- @Override
- public int hashCode() {
- return getVersionNum();
- }
+ NumberFormat numformat = NumberFormat.getIntegerInstance();
+ numformat.setGroupingUsed(false);
+ ParsePosition parsepos = new ParsePosition(0);
- @Override
- public String toString() {
- return Integer.toString(versionNum);
- }
- };
- }
+ int[] parts = new int[3];
+ int versionParts;
+ for (versionParts = 0; versionParts < 3; versionParts++) {
+ Number part = (Number) numformat.parseObject(serverVersion, parsepos);
+ if (part == null) {
+ break;
+ }
+ parts[versionParts] = part.intValue();
+ if (parsepos.getIndex() == serverVersion.length()
+ || serverVersion.charAt(parsepos.getIndex()) != '.') {
+ break;
+ }
+ // Skip .
+ parsepos.setIndex(parsepos.getIndex() + 1);
+ }
+ versionParts++;
- /**
- *
Attempt to parse the server version string into an XXYYZZ form version number.
- *
- *
Returns 0 if the version could not be parsed.
- *
- *
Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
- * releases.
- *
- *
If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
- * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).
- *
- *
The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
- * version part is out of range.
- *
- * @param serverVersion server version in a XXYYZZ form
- * @return server version in number form
- */
- static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
- if (serverVersion == null) {
- return 0;
- }
-
- NumberFormat numformat = NumberFormat.getIntegerInstance();
- numformat.setGroupingUsed(false);
- ParsePosition parsepos = new ParsePosition(0);
-
- int[] parts = new int[3];
- int versionParts;
- for (versionParts = 0; versionParts < 3; versionParts++) {
- Number part = (Number) numformat.parseObject(serverVersion, parsepos);
- if (part == null) {
- break;
- }
- parts[versionParts] = part.intValue();
- if (parsepos.getIndex() == serverVersion.length()
- || serverVersion.charAt(parsepos.getIndex()) != '.') {
- break;
- }
- // Skip .
- parsepos.setIndex(parsepos.getIndex() + 1);
- }
- versionParts++;
-
- if (parts[0] >= 10000) {
- /*
- * PostgreSQL version 1000? I don't think so. We're seeing a version like 90401; return it
- * verbatim, but only if there's nothing else in the version. If there is, treat it as a parse
- * error.
- */
- if (parsepos.getIndex() == serverVersion.length() && versionParts == 1) {
- return parts[0];
- } else {
- throw new NumberFormatException(
- "First major-version part equal to or greater than 10000 in invalid version string: "
- + serverVersion);
- }
- }
+ if (parts[0] >= 10000) {
+ /*
+ * PostgreSQL version 1000? I don't think so. We're seeing a version like 90401; return it
+ * verbatim, but only if there's nothing else in the version. If there is, treat it as a parse
+ * error.
+ */
+ if (parsepos.getIndex() == serverVersion.length() && versionParts == 1) {
+ return parts[0];
+ } else {
+ throw new NumberFormatException(
+ "First major-version part equal to or greater than 10000 in invalid version string: "
+ + serverVersion);
+ }
+ }
/* #667 - Allow for versions with greater than 3 parts.
For versions with more than 3 parts, still return 3 parts (4th part ignored for now
@@ -150,36 +139,46 @@ public enum ServerVersion implements Version {
Allows for future versions of the server to utilize more than 3 part version numbers
without upgrading the jdbc driver */
- if (versionParts >= 3) {
- if (parts[1] > 99) {
- throw new NumberFormatException(
- "Unsupported second part of major version > 99 in invalid version string: "
- + serverVersion);
- }
- if (parts[2] > 99) {
- throw new NumberFormatException(
- "Unsupported second part of minor version > 99 in invalid version string: "
- + serverVersion);
- }
- return (parts[0] * 100 + parts[1]) * 100 + parts[2];
+ if (versionParts >= 3) {
+ if (parts[1] > 99) {
+ throw new NumberFormatException(
+ "Unsupported second part of major version > 99 in invalid version string: "
+ + serverVersion);
+ }
+ if (parts[2] > 99) {
+ throw new NumberFormatException(
+ "Unsupported second part of minor version > 99 in invalid version string: "
+ + serverVersion);
+ }
+ return (parts[0] * 100 + parts[1]) * 100 + parts[2];
+ }
+ if (versionParts == 2) {
+ if (parts[0] >= 10) {
+ return parts[0] * 100 * 100 + parts[1];
+ }
+ if (parts[1] > 99) {
+ throw new NumberFormatException(
+ "Unsupported second part of major version > 99 in invalid version string: "
+ + serverVersion);
+ }
+ return (parts[0] * 100 + parts[1]) * 100;
+ }
+ if (versionParts == 1) {
+ if (parts[0] >= 10) {
+ return parts[0] * 100 * 100;
+ }
+ }
+ return 0; /* unknown */
}
- if (versionParts == 2) {
- if (parts[0] >= 10) {
- return parts[0] * 100 * 100 + parts[1];
- }
- if (parts[1] > 99) {
- throw new NumberFormatException(
- "Unsupported second part of major version > 99 in invalid version string: "
- + serverVersion);
- }
- return (parts[0] * 100 + parts[1]) * 100;
+
+ /**
+ * Get a machine-readable version number.
+ *
+ * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
+ */
+ @Override
+ public int getVersionNum() {
+ return version;
}
- if (versionParts == 1) {
- if (parts[0] >= 10) {
- return parts[0] * 100 * 100;
- }
- }
- return 0; /* unknown */
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java b/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java
index 739043e..4559586 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java
@@ -6,13 +6,12 @@
package org.postgresql.core;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
/**
* Poor man's Statement & ResultSet, used for initial queries while we're still initializing the
@@ -20,57 +19,57 @@ import java.util.List;
*/
public class SetupQueryRunner {
- public SetupQueryRunner() {
- }
-
- private static class SimpleResultHandler extends ResultHandlerBase {
- private List tuples;
-
- List getResults() {
- return tuples;
+ public SetupQueryRunner() {
}
- @Override
- public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
- ResultCursor cursor) {
- this.tuples = tuples;
+ public static Tuple run(QueryExecutor executor, String queryString,
+ boolean wantResults) throws SQLException {
+ Query query = executor.createSimpleQuery(queryString);
+ SimpleResultHandler handler = new SimpleResultHandler();
+
+ int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_SUPPRESS_BEGIN
+ | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+ if (!wantResults) {
+ flags |= QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_NO_METADATA;
+ }
+
+ try {
+ executor.execute(query, null, handler, 0, 0, flags);
+ } finally {
+ query.close();
+ }
+
+ if (!wantResults) {
+ return null;
+ }
+
+ List tuples = handler.getResults();
+ if (tuples == null || tuples.size() != 1) {
+ throw new PSQLException(GT.tr("An unexpected result was returned by a query."),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+ }
+
+ return tuples.get(0);
}
- @Override
- public void handleWarning(SQLWarning warning) {
- // We ignore warnings. We assume we know what we're
- // doing in the setup queries.
+ private static class SimpleResultHandler extends ResultHandlerBase {
+ private List tuples;
+
+ List getResults() {
+ return tuples;
+ }
+
+ @Override
+ public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
+ ResultCursor cursor) {
+ this.tuples = tuples;
+ }
+
+ @Override
+ public void handleWarning(SQLWarning warning) {
+ // We ignore warnings. We assume we know what we're
+ // doing in the setup queries.
+ }
}
- }
-
- public static Tuple run(QueryExecutor executor, String queryString,
- boolean wantResults) throws SQLException {
- Query query = executor.createSimpleQuery(queryString);
- SimpleResultHandler handler = new SimpleResultHandler();
-
- int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_SUPPRESS_BEGIN
- | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
- if (!wantResults) {
- flags |= QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_NO_METADATA;
- }
-
- try {
- executor.execute(query, null, handler, 0, 0, flags);
- } finally {
- query.close();
- }
-
- if (!wantResults) {
- return null;
- }
-
- List tuples = handler.getResults();
- if (tuples == null || tuples.size() != 1) {
- throw new PSQLException(GT.tr("An unexpected result was returned by a query."),
- PSQLState.CONNECTION_UNABLE_TO_CONNECT);
- }
-
- return tuples.get(0);
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java
index f54fb00..2d786f0 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java
@@ -5,6 +5,9 @@
package org.postgresql.core;
+import java.util.Properties;
+import javax.net.SocketFactory;
+import javax.net.ssl.SSLSocketFactory;
import org.postgresql.PGProperty;
import org.postgresql.ssl.LibPQFactory;
import org.postgresql.util.GT;
@@ -12,66 +15,61 @@ import org.postgresql.util.ObjectFactory;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
-import java.util.Properties;
-
-import javax.net.SocketFactory;
-import javax.net.ssl.SSLSocketFactory;
-
/**
* Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}.
*/
public class SocketFactoryFactory {
- public SocketFactoryFactory() {
- }
+ public SocketFactoryFactory() {
+ }
- /**
- * Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}.
- *
- * @param info connection properties
- * @return socket factory
- * @throws PSQLException if something goes wrong
- */
- public static SocketFactory getSocketFactory(Properties info) throws PSQLException {
- // Socket factory
- String socketFactoryClassName = PGProperty.SOCKET_FACTORY.getOrDefault(info);
- if (socketFactoryClassName == null) {
- return SocketFactory.getDefault();
+ /**
+ * Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}.
+ *
+ * @param info connection properties
+ * @return socket factory
+ * @throws PSQLException if something goes wrong
+ */
+ public static SocketFactory getSocketFactory(Properties info) throws PSQLException {
+ // Socket factory
+ String socketFactoryClassName = PGProperty.SOCKET_FACTORY.getOrDefault(info);
+ if (socketFactoryClassName == null) {
+ return SocketFactory.getDefault();
+ }
+ try {
+ return ObjectFactory.instantiate(SocketFactory.class, socketFactoryClassName, info, true,
+ PGProperty.SOCKET_FACTORY_ARG.getOrDefault(info));
+ } catch (Exception e) {
+ throw new PSQLException(
+ GT.tr("The SocketFactory class provided {0} could not be instantiated.",
+ socketFactoryClassName),
+ PSQLState.CONNECTION_FAILURE, e);
+ }
}
- try {
- return ObjectFactory.instantiate(SocketFactory.class, socketFactoryClassName, info, true,
- PGProperty.SOCKET_FACTORY_ARG.getOrDefault(info));
- } catch (Exception e) {
- throw new PSQLException(
- GT.tr("The SocketFactory class provided {0} could not be instantiated.",
- socketFactoryClassName),
- PSQLState.CONNECTION_FAILURE, e);
- }
- }
- /**
- * Instantiates {@link SSLSocketFactory} based on the {@link PGProperty#SSL_FACTORY}.
- *
- * @param info connection properties
- * @return SSL socket factory
- * @throws PSQLException if something goes wrong
- */
- @SuppressWarnings("deprecation")
- public static SSLSocketFactory getSslSocketFactory(Properties info) throws PSQLException {
- String classname = PGProperty.SSL_FACTORY.getOrDefault(info);
- if (classname == null
- || "org.postgresql.ssl.jdbc4.LibPQFactory".equals(classname)
- || "org.postgresql.ssl.LibPQFactory".equals(classname)) {
- return new LibPQFactory(info);
+ /**
+ * Instantiates {@link SSLSocketFactory} based on the {@link PGProperty#SSL_FACTORY}.
+ *
+ * @param info connection properties
+ * @return SSL socket factory
+ * @throws PSQLException if something goes wrong
+ */
+ @SuppressWarnings("deprecation")
+ public static SSLSocketFactory getSslSocketFactory(Properties info) throws PSQLException {
+ String classname = PGProperty.SSL_FACTORY.getOrDefault(info);
+ if (classname == null
+ || "org.postgresql.ssl.jdbc4.LibPQFactory".equals(classname)
+ || "org.postgresql.ssl.LibPQFactory".equals(classname)) {
+ return new LibPQFactory(info);
+ }
+ try {
+ return ObjectFactory.instantiate(SSLSocketFactory.class, classname, info, true,
+ PGProperty.SSL_FACTORY_ARG.getOrDefault(info));
+ } catch (Exception e) {
+ throw new PSQLException(
+ GT.tr("The SSLSocketFactory class provided {0} could not be instantiated.", classname),
+ PSQLState.CONNECTION_FAILURE, e);
+ }
}
- try {
- return ObjectFactory.instantiate(SSLSocketFactory.class, classname, info, true,
- PGProperty.SSL_FACTORY_ARG.getOrDefault(info));
- } catch (Exception e) {
- throw new PSQLException(
- GT.tr("The SSLSocketFactory class provided {0} could not be instantiated.", classname),
- PSQLState.CONNECTION_FAILURE, e);
- }
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java b/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java
index 90201fa..3f6d542 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java
@@ -14,68 +14,66 @@ import static org.postgresql.core.SqlCommandType.WITH;
*
* @author Jeremy Whiting jwhiting@redhat.com
* @author Christopher Deckers (chrriis@gmail.com)
- *
*/
public class SqlCommand {
- public static final SqlCommand BLANK = SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK);
+ public static final SqlCommand BLANK = SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK);
+ private final SqlCommandType commandType;
+ private final boolean parsedSQLhasRETURNINGKeyword;
+ private final int valuesBraceOpenPosition;
+ private final int valuesBraceClosePosition;
- public boolean isBatchedReWriteCompatible() {
- return valuesBraceOpenPosition >= 0;
- }
+ private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured,
+ int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent,
+ int priorQueryCount) {
+ commandType = type;
+ parsedSQLhasRETURNINGKeyword = isPresent;
+ boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured
+ && valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition
+ && !isPresent && priorQueryCount == 0;
+ this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1;
+ this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1;
+ }
- public int getBatchRewriteValuesBraceOpenPosition() {
- return valuesBraceOpenPosition;
- }
+ public static SqlCommand createStatementTypeInfo(SqlCommandType type,
+ boolean isBatchedReWritePropertyConfigured,
+ int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent,
+ int priorQueryCount) {
+ return new SqlCommand(type, isBatchedReWritePropertyConfigured,
+ valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent,
+ priorQueryCount);
+ }
- public int getBatchRewriteValuesBraceClosePosition() {
- return valuesBraceClosePosition;
- }
+ public static SqlCommand createStatementTypeInfo(SqlCommandType type) {
+ return new SqlCommand(type, false, -1, -1, false, 0);
+ }
- public SqlCommandType getType() {
- return commandType;
- }
+ public static SqlCommand createStatementTypeInfo(SqlCommandType type,
+ boolean isRETURNINGkeywordPresent) {
+ return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0);
+ }
- public boolean isReturningKeywordPresent() {
- return parsedSQLhasRETURNINGKeyword;
- }
+ public boolean isBatchedReWriteCompatible() {
+ return valuesBraceOpenPosition >= 0;
+ }
- public boolean returnsRows() {
- return parsedSQLhasRETURNINGKeyword || commandType == SELECT || commandType == WITH;
- }
+ public int getBatchRewriteValuesBraceOpenPosition() {
+ return valuesBraceOpenPosition;
+ }
- public static SqlCommand createStatementTypeInfo(SqlCommandType type,
- boolean isBatchedReWritePropertyConfigured,
- int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent,
- int priorQueryCount) {
- return new SqlCommand(type, isBatchedReWritePropertyConfigured,
- valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent,
- priorQueryCount);
- }
+ public int getBatchRewriteValuesBraceClosePosition() {
+ return valuesBraceClosePosition;
+ }
- public static SqlCommand createStatementTypeInfo(SqlCommandType type) {
- return new SqlCommand(type, false, -1, -1, false, 0);
- }
+ public SqlCommandType getType() {
+ return commandType;
+ }
- public static SqlCommand createStatementTypeInfo(SqlCommandType type,
- boolean isRETURNINGkeywordPresent) {
- return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0);
- }
+ public boolean isReturningKeywordPresent() {
+ return parsedSQLhasRETURNINGKeyword;
+ }
- private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured,
- int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent,
- int priorQueryCount) {
- commandType = type;
- parsedSQLhasRETURNINGKeyword = isPresent;
- boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured
- && valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition
- && !isPresent && priorQueryCount == 0;
- this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1;
- this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1;
- }
-
- private final SqlCommandType commandType;
- private final boolean parsedSQLhasRETURNINGKeyword;
- private final int valuesBraceOpenPosition;
- private final int valuesBraceClosePosition;
+ public boolean returnsRows() {
+ return parsedSQLhasRETURNINGKeyword || commandType == SELECT || commandType == WITH;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java b/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java
index 3a4fc43..d306d87 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java
@@ -7,23 +7,23 @@ package org.postgresql.core;
/**
* Type information inspection support.
- * @author Jeremy Whiting jwhiting@redhat.com
*
+ * @author Jeremy Whiting jwhiting@redhat.com
*/
public enum SqlCommandType {
- /**
- * Use BLANK for empty sql queries or when parsing the sql string is not
- * necessary.
- */
- BLANK,
- INSERT,
- UPDATE,
- DELETE,
- MOVE,
- SELECT,
- WITH,
- CREATE,
- ALTER
+ /**
+ * Use BLANK for empty sql queries or when parsing the sql string is not
+ * necessary.
+ */
+ BLANK,
+ INSERT,
+ UPDATE,
+ DELETE,
+ MOVE,
+ SELECT,
+ WITH,
+ CREATE,
+ ALTER
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java b/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java
index b819026..14c53a6 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java
@@ -6,7 +6,7 @@
package org.postgresql.core;
public enum TransactionState {
- IDLE,
- OPEN,
- FAILED
+ IDLE,
+ OPEN,
+ FAILED
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Tuple.java b/pgjdbc/src/main/java/org/postgresql/core/Tuple.java
index 5f6e488..852e170 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Tuple.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Tuple.java
@@ -9,92 +9,100 @@ package org.postgresql.core;
* Class representing a row in a {@link java.sql.ResultSet}.
*/
public class Tuple {
- private final boolean forUpdate;
- final byte[] [] data;
+ final byte[][] data;
+ private final boolean forUpdate;
- /**
- * Construct an empty tuple. Used in updatable result sets.
- * @param length the number of fields in the tuple.
- */
- public Tuple(int length) {
- this(new byte[length][], true);
- }
-
- /**
- * Construct a populated tuple. Used when returning results.
- * @param data the tuple data
- */
- public Tuple(byte[] [] data) {
- this(data, false);
- }
-
- private Tuple(byte[] [] data, boolean forUpdate) {
- this.data = data;
- this.forUpdate = forUpdate;
- }
-
- /**
- * Number of fields in the tuple
- * @return number of fields
- */
- public int fieldCount() {
- return data.length;
- }
-
- /**
- * Total length in bytes of the tuple data.
- * @return the number of bytes in this tuple
- */
- public int length() {
- int length = 0;
- for (byte[] field : data) {
- if (field != null) {
- length += field.length;
- }
+ /**
+ * Construct an empty tuple. Used in updatable result sets.
+ *
+ * @param length the number of fields in the tuple.
+ */
+ public Tuple(int length) {
+ this(new byte[length][], true);
}
- return length;
- }
- /**
- * Get the data for the given field
- * @param index 0-based field position in the tuple
- * @return byte array of the data
- */
- public byte [] get(int index) {
- return data[index];
- }
-
- /**
- * Create a copy of the tuple for updating.
- * @return a copy of the tuple that allows updates
- */
- public Tuple updateableCopy() {
- return copy(true);
- }
-
- /**
- * Create a read-only copy of the tuple
- * @return a copy of the tuple that does not allow updates
- */
- public Tuple readOnlyCopy() {
- return copy(false);
- }
-
- private Tuple copy(boolean forUpdate) {
- byte[][] dataCopy = new byte[data.length][];
- System.arraycopy(data, 0, dataCopy, 0, data.length);
- return new Tuple(dataCopy, forUpdate);
- }
-
- /**
- * Set the given field to the given data.
- * @param index 0-based field position
- * @param fieldData the data to set
- */
- public void set(int index, byte [] fieldData) {
- if (!forUpdate) {
- throw new IllegalArgumentException("Attempted to write to readonly tuple");
+ /**
+ * Construct a populated tuple. Used when returning results.
+ *
+ * @param data the tuple data
+ */
+ public Tuple(byte[][] data) {
+ this(data, false);
+ }
+
+ private Tuple(byte[][] data, boolean forUpdate) {
+ this.data = data;
+ this.forUpdate = forUpdate;
+ }
+
+ /**
+ * Number of fields in the tuple
+ *
+ * @return number of fields
+ */
+ public int fieldCount() {
+ return data.length;
+ }
+
+ /**
+ * Total length in bytes of the tuple data.
+ *
+ * @return the number of bytes in this tuple
+ */
+ public int length() {
+ int length = 0;
+ for (byte[] field : data) {
+ if (field != null) {
+ length += field.length;
+ }
+ }
+ return length;
+ }
+
+ /**
+ * Get the data for the given field
+ *
+ * @param index 0-based field position in the tuple
+ * @return byte array of the data
+ */
+ public byte[] get(int index) {
+ return data[index];
+ }
+
+ /**
+ * Create a copy of the tuple for updating.
+ *
+ * @return a copy of the tuple that allows updates
+ */
+ public Tuple updateableCopy() {
+ return copy(true);
+ }
+
+ /**
+ * Create a read-only copy of the tuple
+ *
+ * @return a copy of the tuple that does not allow updates
+ */
+ public Tuple readOnlyCopy() {
+ return copy(false);
+ }
+
+ private Tuple copy(boolean forUpdate) {
+ byte[][] dataCopy = new byte[data.length][];
+ System.arraycopy(data, 0, dataCopy, 0, data.length);
+ return new Tuple(dataCopy, forUpdate);
+ }
+
+ /**
+ * Set the given field to the given data.
+ *
+ * @param index 0-based field position
+ * @param fieldData the data to set
+ */
+ public void set(int index, byte[] fieldData) {
+ if (!forUpdate) {
+ throw new IllegalArgumentException("Attempted to write to readonly tuple");
+ }
+ data[index] = fieldData;
}
- data[index] = fieldData;
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java b/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java
index f41b407..2d23ac2 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java
@@ -5,141 +5,140 @@
package org.postgresql.core;
-import org.postgresql.util.PGobject;
-
import java.sql.SQLException;
import java.util.Iterator;
+import org.postgresql.util.PGobject;
public interface TypeInfo {
- void addCoreType(String pgTypeName, Integer oid, Integer sqlType, String javaClass,
- Integer arrayOid);
+ void addCoreType(String pgTypeName, Integer oid, Integer sqlType, String javaClass,
+ Integer arrayOid);
- void addDataType(String type, Class extends PGobject> klass) throws SQLException;
+ void addDataType(String type, Class extends PGobject> klass) throws SQLException;
- /**
- * Look up the SQL typecode for a given type oid.
- *
- * @param oid the type's OID
- * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
- * @throws SQLException if an error occurs when retrieving sql type
- */
- int getSQLType(int oid) throws SQLException;
+ /**
+ * Look up the SQL typecode for a given type oid.
+ *
+ * @param oid the type's OID
+ * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
+ * @throws SQLException if an error occurs when retrieving sql type
+ */
+ int getSQLType(int oid) throws SQLException;
- /**
- * Look up the SQL typecode for a given postgresql type name.
- *
- * @param pgTypeName the server type name to look up
- * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
- * @throws SQLException if an error occurs when retrieving sql type
- */
- int getSQLType(String pgTypeName) throws SQLException;
+ /**
+ * Look up the SQL typecode for a given postgresql type name.
+ *
+ * @param pgTypeName the server type name to look up
+ * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
+ * @throws SQLException if an error occurs when retrieving sql type
+ */
+ int getSQLType(String pgTypeName) throws SQLException;
- int getJavaArrayType(String className) throws SQLException;
+ int getJavaArrayType(String className) throws SQLException;
- /**
- * Look up the oid for a given postgresql type name. This is the inverse of
- * {@link #getPGType(int)}.
- *
- * @param pgTypeName the server type name to look up
- * @return the type's OID, or 0 if unknown
- * @throws SQLException if an error occurs when retrieving PG type
- */
- int getPGType(String pgTypeName) throws SQLException;
+ /**
+ * Look up the oid for a given postgresql type name. This is the inverse of
+ * {@link #getPGType(int)}.
+ *
+ * @param pgTypeName the server type name to look up
+ * @return the type's OID, or 0 if unknown
+ * @throws SQLException if an error occurs when retrieving PG type
+ */
+ int getPGType(String pgTypeName) throws SQLException;
- /**
- * Look up the postgresql type name for a given oid. This is the inverse of
- * {@link #getPGType(String)}.
- *
- * @param oid the type's OID
- * @return the server type name for that OID or null if unknown
- * @throws SQLException if an error occurs when retrieving PG type
- */
- String getPGType(int oid) throws SQLException;
+ /**
+ * Look up the postgresql type name for a given oid. This is the inverse of
+ * {@link #getPGType(String)}.
+ *
+ * @param oid the type's OID
+ * @return the server type name for that OID or null if unknown
+ * @throws SQLException if an error occurs when retrieving PG type
+ */
+ String getPGType(int oid) throws SQLException;
- /**
- * Look up the oid of an array's base type given the array's type oid.
- *
- * @param oid the array type's OID
- * @return the base type's OID, or 0 if unknown
- * @throws SQLException if an error occurs when retrieving array element
- */
- int getPGArrayElement(int oid) throws SQLException;
+ /**
+ * Look up the oid of an array's base type given the array's type oid.
+ *
+ * @param oid the array type's OID
+ * @return the base type's OID, or 0 if unknown
+ * @throws SQLException if an error occurs when retrieving array element
+ */
+ int getPGArrayElement(int oid) throws SQLException;
- /**
- * Determine the oid of the given base postgresql type's array type.
- *
- * @param elementTypeName the base type's
- * @return the array type's OID, or 0 if unknown
- * @throws SQLException if an error occurs when retrieving array type
- */
- int getPGArrayType(String elementTypeName) throws SQLException;
+ /**
+ * Determine the oid of the given base postgresql type's array type.
+ *
+ * @param elementTypeName the base type's
+ * @return the array type's OID, or 0 if unknown
+ * @throws SQLException if an error occurs when retrieving array type
+ */
+ int getPGArrayType(String elementTypeName) throws SQLException;
- /**
- * Determine the delimiter for the elements of the given array type oid.
- *
- * @param oid the array type's OID
- * @return the base type's array type delimiter
- * @throws SQLException if an error occurs when retrieving array delimiter
- */
- char getArrayDelimiter(int oid) throws SQLException;
+ /**
+ * Determine the delimiter for the elements of the given array type oid.
+ *
+ * @param oid the array type's OID
+ * @return the base type's array type delimiter
+ * @throws SQLException if an error occurs when retrieving array delimiter
+ */
+ char getArrayDelimiter(int oid) throws SQLException;
- Iterator getPGTypeNamesWithSQLTypes();
+ Iterator getPGTypeNamesWithSQLTypes();
- Iterator getPGTypeOidsWithSQLTypes();
+ Iterator getPGTypeOidsWithSQLTypes();
- Class extends PGobject> getPGobject(String type);
+ Class extends PGobject> getPGobject(String type);
- String getJavaClass(int oid) throws SQLException;
+ String getJavaClass(int oid) throws SQLException;
- String getTypeForAlias(String alias);
+ String getTypeForAlias(String alias);
- int getPrecision(int oid, int typmod);
+ int getPrecision(int oid, int typmod);
- int getScale(int oid, int typmod);
+ int getScale(int oid, int typmod);
- boolean isCaseSensitive(int oid);
+ boolean isCaseSensitive(int oid);
- boolean isSigned(int oid);
+ boolean isSigned(int oid);
- int getDisplaySize(int oid, int typmod);
+ int getDisplaySize(int oid, int typmod);
- int getMaximumPrecision(int oid);
+ int getMaximumPrecision(int oid);
- boolean requiresQuoting(int oid) throws SQLException;
+ boolean requiresQuoting(int oid) throws SQLException;
- /**
- * Returns true if particular sqlType requires quoting.
- * This method is used internally by the driver, so it might disappear without notice.
- *
- * @param sqlType sql type as in java.sql.Types
- * @return true if the type requires quoting
- * @throws SQLException if something goes wrong
- */
- boolean requiresQuotingSqlType(int sqlType) throws SQLException;
+ /**
+ * Returns true if particular sqlType requires quoting.
+ * This method is used internally by the driver, so it might disappear without notice.
+ *
+ * @param sqlType sql type as in java.sql.Types
+ * @return true if the type requires quoting
+ * @throws SQLException if something goes wrong
+ */
+ boolean requiresQuotingSqlType(int sqlType) throws SQLException;
- /**
- *
Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
- * We therefore read them as positive long values and then force them into signed integers
- * (wrapping around into negative values when required) or we'd be unable to correctly
- * handle the upper half of the oid space.
- *
- *
This function handles the mapping of uint32-values in the long to java integers, and
- * throws for values that are out of range.
- *
- * @param oid the oid as a long.
- * @return the (internal) signed integer representation of the (unsigned) oid.
- * @throws SQLException if the long has a value outside of the range representable by uint32
- */
- int longOidToInt(long oid) throws SQLException;
+ /**
+ *
Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
+ * We therefore read them as positive long values and then force them into signed integers
+ * (wrapping around into negative values when required) or we'd be unable to correctly
+ * handle the upper half of the oid space.
+ *
+ *
This function handles the mapping of uint32-values in the long to java integers, and
+ * throws for values that are out of range.
+ *
+ * @param oid the oid as a long.
+ * @return the (internal) signed integer representation of the (unsigned) oid.
+ * @throws SQLException if the long has a value outside of the range representable by uint32
+ */
+ int longOidToInt(long oid) throws SQLException;
- /**
- * Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
- * We must therefore first map the (internal) integer representation to a positive long
- * value before sending it to postgresql, or we would be unable to correctly handle the
- * upper half of the oid space because these negative values are disallowed as OID values.
- *
- * @param oid the (signed) integer oid to convert into a long.
- * @return the non-negative value of this oid, stored as a java long.
- */
- long intOidToLong(int oid);
+ /**
+ * Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
+ * We must therefore first map the (internal) integer representation to a positive long
+ * value before sending it to postgresql, or we would be unable to correctly handle the
+ * upper half of the oid space because these negative values are disallowed as OID values.
+ *
+ * @param oid the (signed) integer oid to convert into a long.
+ * @return the non-negative value of this oid, stored as a java long.
+ */
+ long intOidToLong(int oid);
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Utils.java b/pgjdbc/src/main/java/org/postgresql/core/Utils.java
index d96c6e3..b674949 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Utils.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Utils.java
@@ -6,175 +6,174 @@
package org.postgresql.core;
+import java.io.IOException;
+import java.sql.SQLException;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
-import java.io.IOException;
-import java.sql.SQLException;
-
/**
* Collection of utilities used by the protocol-level code.
*/
public class Utils {
- public Utils() {
- }
-
- /**
- * Turn a bytearray into a printable form, representing each byte in hex.
- *
- * @param data the bytearray to stringize
- * @return a hex-encoded printable representation of {@code data}
- */
- public static String toHexString(byte[] data) {
- StringBuilder sb = new StringBuilder(data.length * 2);
- for (byte element : data) {
- sb.append(Integer.toHexString((element >> 4) & 15));
- sb.append(Integer.toHexString(element & 15));
+ public Utils() {
}
- return sb.toString();
- }
- /**
- * Escape the given literal {@code value} and append it to the string builder {@code sbuf}. If
- * {@code sbuf} is {@code null}, a new StringBuilder will be returned. The argument
- * {@code standardConformingStrings} defines whether the backend expects standard-conforming
- * string literals or allows backslash escape sequences.
- *
- * @param sbuf the string builder to append to; or {@code null}
- * @param value the string value
- * @param standardConformingStrings if standard conforming strings should be used
- * @return the sbuf argument; or a new string builder for sbuf == null
- * @throws SQLException if the string contains a {@code \0} character
- */
- public static StringBuilder escapeLiteral(StringBuilder sbuf, String value,
- boolean standardConformingStrings) throws SQLException {
- if (sbuf == null) {
- sbuf = new StringBuilder((value.length() + 10) / 10 * 11); // Add 10% for escaping.
- }
- doAppendEscapedLiteral(sbuf, value, standardConformingStrings);
- return sbuf;
- }
-
- /**
- * Common part for {@link #escapeLiteral(StringBuilder, String, boolean)}.
- *
- * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
- * thrown
- * @param value value to append
- * @param standardConformingStrings if standard conforming strings should be used
- */
- private static void doAppendEscapedLiteral(Appendable sbuf, String value,
- boolean standardConformingStrings) throws SQLException {
- try {
- if (standardConformingStrings) {
- // With standard_conforming_strings on, escape only single-quotes.
- for (int i = 0; i < value.length(); i++) {
- char ch = value.charAt(i);
- if (ch == '\0') {
- throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
- PSQLState.INVALID_PARAMETER_VALUE);
- }
- if (ch == '\'') {
- sbuf.append('\'');
- }
- sbuf.append(ch);
+ /**
+ * Turn a bytearray into a printable form, representing each byte in hex.
+ *
+ * @param data the bytearray to stringize
+ * @return a hex-encoded printable representation of {@code data}
+ */
+ public static String toHexString(byte[] data) {
+ StringBuilder sb = new StringBuilder(data.length * 2);
+ for (byte element : data) {
+ sb.append(Integer.toHexString((element >> 4) & 15));
+ sb.append(Integer.toHexString(element & 15));
}
- } else {
- // With standard_conforming_string off, escape backslashes and
- // single-quotes, but still escape single-quotes by doubling, to
- // avoid a security hazard if the reported value of
- // standard_conforming_strings is incorrect, or an error if
- // backslash_quote is off.
- for (int i = 0; i < value.length(); i++) {
- char ch = value.charAt(i);
- if (ch == '\0') {
- throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
- PSQLState.INVALID_PARAMETER_VALUE);
- }
- if (ch == '\\' || ch == '\'') {
- sbuf.append(ch);
- }
- sbuf.append(ch);
- }
- }
- } catch (IOException e) {
- throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
- PSQLState.UNEXPECTED_ERROR, e);
+ return sb.toString();
}
- }
- /**
- * Escape the given identifier {@code value} and append it to the string builder {@code sbuf}.
- * If {@code sbuf} is {@code null}, a new StringBuilder will be returned. This method is
- * different from appendEscapedLiteral in that it includes the quoting required for the identifier
- * while {@link #escapeLiteral(StringBuilder, String, boolean)} does not.
- *
- * @param sbuf the string builder to append to; or {@code null}
- * @param value the string value
- * @return the sbuf argument; or a new string builder for sbuf == null
- * @throws SQLException if the string contains a {@code \0} character
- */
- public static StringBuilder escapeIdentifier(StringBuilder sbuf, String value)
- throws SQLException {
- if (sbuf == null) {
- sbuf = new StringBuilder(2 + (value.length() + 10) / 10 * 11); // Add 10% for escaping.
- }
- doAppendEscapedIdentifier(sbuf, value);
- return sbuf;
- }
-
- /**
- * Common part for appendEscapedIdentifier.
- *
- * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
- * thrown.
- * @param value value to append
- */
- private static void doAppendEscapedIdentifier(Appendable sbuf, String value) throws SQLException {
- try {
- sbuf.append('"');
-
- for (int i = 0; i < value.length(); i++) {
- char ch = value.charAt(i);
- if (ch == '\0') {
- throw new PSQLException(GT.tr("Zero bytes may not occur in identifiers."),
- PSQLState.INVALID_PARAMETER_VALUE);
+ /**
+ * Escape the given literal {@code value} and append it to the string builder {@code sbuf}. If
+ * {@code sbuf} is {@code null}, a new StringBuilder will be returned. The argument
+ * {@code standardConformingStrings} defines whether the backend expects standard-conforming
+ * string literals or allows backslash escape sequences.
+ *
+ * @param sbuf the string builder to append to; or {@code null}
+ * @param value the string value
+ * @param standardConformingStrings if standard conforming strings should be used
+ * @return the sbuf argument; or a new string builder for sbuf == null
+ * @throws SQLException if the string contains a {@code \0} character
+ */
+ public static StringBuilder escapeLiteral(StringBuilder sbuf, String value,
+ boolean standardConformingStrings) throws SQLException {
+ if (sbuf == null) {
+ sbuf = new StringBuilder((value.length() + 10) / 10 * 11); // Add 10% for escaping.
}
- if (ch == '"') {
- sbuf.append(ch);
- }
- sbuf.append(ch);
- }
-
- sbuf.append('"');
- } catch (IOException e) {
- throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
- PSQLState.UNEXPECTED_ERROR, e);
+ doAppendEscapedLiteral(sbuf, value, standardConformingStrings);
+ return sbuf;
}
- }
- /**
- *
Attempt to parse the server version string into an XXYYZZ form version number.
- *
- *
Returns 0 if the version could not be parsed.
- *
- *
Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
- * releases.
- *
- *
If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
- * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).
- *
- *
The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
- * version part is out of range.
- *
- * @param serverVersion server version in a XXYYZZ form
- * @return server version in number form
- * @deprecated use specific {@link Version} instance
- */
- @Deprecated
- public static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
- return ServerVersion.parseServerVersionStr(serverVersion);
- }
+ /**
+ * Common part for {@link #escapeLiteral(StringBuilder, String, boolean)}.
+ *
+ * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
+ * thrown
+ * @param value value to append
+ * @param standardConformingStrings if standard conforming strings should be used
+ */
+ private static void doAppendEscapedLiteral(Appendable sbuf, String value,
+ boolean standardConformingStrings) throws SQLException {
+ try {
+ if (standardConformingStrings) {
+ // With standard_conforming_strings on, escape only single-quotes.
+ for (int i = 0; i < value.length(); i++) {
+ char ch = value.charAt(i);
+ if (ch == '\0') {
+ throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ if (ch == '\'') {
+ sbuf.append('\'');
+ }
+ sbuf.append(ch);
+ }
+ } else {
+ // With standard_conforming_string off, escape backslashes and
+ // single-quotes, but still escape single-quotes by doubling, to
+ // avoid a security hazard if the reported value of
+ // standard_conforming_strings is incorrect, or an error if
+ // backslash_quote is off.
+ for (int i = 0; i < value.length(); i++) {
+ char ch = value.charAt(i);
+ if (ch == '\0') {
+ throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ if (ch == '\\' || ch == '\'') {
+ sbuf.append(ch);
+ }
+ sbuf.append(ch);
+ }
+ }
+ } catch (IOException e) {
+ throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
+ PSQLState.UNEXPECTED_ERROR, e);
+ }
+ }
+
+ /**
+ * Escape the given identifier {@code value} and append it to the string builder {@code sbuf}.
+ * If {@code sbuf} is {@code null}, a new StringBuilder will be returned. This method is
+ * different from appendEscapedLiteral in that it includes the quoting required for the identifier
+ * while {@link #escapeLiteral(StringBuilder, String, boolean)} does not.
+ *
+ * @param sbuf the string builder to append to; or {@code null}
+ * @param value the string value
+ * @return the sbuf argument; or a new string builder for sbuf == null
+ * @throws SQLException if the string contains a {@code \0} character
+ */
+ public static StringBuilder escapeIdentifier(StringBuilder sbuf, String value)
+ throws SQLException {
+ if (sbuf == null) {
+ sbuf = new StringBuilder(2 + (value.length() + 10) / 10 * 11); // Add 10% for escaping.
+ }
+ doAppendEscapedIdentifier(sbuf, value);
+ return sbuf;
+ }
+
+ /**
+ * Common part for appendEscapedIdentifier.
+ *
+ * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
+ * thrown.
+ * @param value value to append
+ */
+ private static void doAppendEscapedIdentifier(Appendable sbuf, String value) throws SQLException {
+ try {
+ sbuf.append('"');
+
+ for (int i = 0; i < value.length(); i++) {
+ char ch = value.charAt(i);
+ if (ch == '\0') {
+ throw new PSQLException(GT.tr("Zero bytes may not occur in identifiers."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ if (ch == '"') {
+ sbuf.append(ch);
+ }
+ sbuf.append(ch);
+ }
+
+ sbuf.append('"');
+ } catch (IOException e) {
+ throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
+ PSQLState.UNEXPECTED_ERROR, e);
+ }
+ }
+
+ /**
+ *
Attempt to parse the server version string into an XXYYZZ form version number.
+ *
+ *
Returns 0 if the version could not be parsed.
+ *
+ *
Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
+ * releases.
+ *
+ *
If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
+ * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).
+ *
+ *
The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
+ * version part is out of range.
+ *
+ * @param serverVersion server version in a XXYYZZ form
+ * @return server version in number form
+ * @deprecated use specific {@link Version} instance
+ */
+ @Deprecated
+ public static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
+ return ServerVersion.parseServerVersionStr(serverVersion);
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Version.java b/pgjdbc/src/main/java/org/postgresql/core/Version.java
index 639226a..23a63b4 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Version.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Version.java
@@ -7,11 +7,11 @@ package org.postgresql.core;
public interface Version {
- /**
- * Get a machine-readable version number.
- *
- * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
- */
- int getVersionNum();
+ /**
+ * Get a machine-readable version number.
+ *
+ * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
+ */
+ int getVersionNum();
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java b/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java
index c78623f..590f064 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java
@@ -18,339 +18,338 @@ import java.net.SocketTimeoutException;
*/
public class VisibleBufferedInputStream extends InputStream {
- /**
- * If a direct read to byte array is called that would require a smaller read from the wrapped
- * stream that MINIMUM_READ then first fill the buffer and serve the bytes from there. Larger
- * reads are directly done to the provided byte array.
- */
- private static final int MINIMUM_READ = 1024;
+ /**
+ * If a direct read to byte array is called that would require a smaller read from the wrapped
+ * stream that MINIMUM_READ then first fill the buffer and serve the bytes from there. Larger
+ * reads are directly done to the provided byte array.
+ */
+ private static final int MINIMUM_READ = 1024;
- /**
- * In how large spans is the C string zero-byte scanned.
- */
- private static final int STRING_SCAN_SPAN = 1024;
+ /**
+ * In how large spans is the C string zero-byte scanned.
+ */
+ private static final int STRING_SCAN_SPAN = 1024;
- /**
- * The wrapped input stream.
- */
- private final InputStream wrapped;
+ /**
+ * The wrapped input stream.
+ */
+ private final InputStream wrapped;
- /**
- * The buffer.
- */
- private byte[] buffer;
+ /**
+ * The buffer.
+ */
+ private byte[] buffer;
- /**
- * Current read position in the buffer.
- */
- private int index;
+ /**
+ * Current read position in the buffer.
+ */
+ private int index;
- /**
- * How far is the buffer filled with valid data.
- */
- private int endIndex;
+ /**
+ * How far is the buffer filled with valid data.
+ */
+ private int endIndex;
- /**
- * socket timeout has been requested
- */
- private boolean timeoutRequested;
+ /**
+ * socket timeout has been requested
+ */
+ private boolean timeoutRequested;
- /**
- * Creates a new buffer around the given stream.
- *
- * @param in The stream to buffer.
- * @param bufferSize The initial size of the buffer.
- */
- public VisibleBufferedInputStream(InputStream in, int bufferSize) {
- wrapped = in;
- buffer = new byte[bufferSize < MINIMUM_READ ? MINIMUM_READ : bufferSize];
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int read() throws IOException {
- if (ensureBytes(1)) {
- return buffer[index++] & 0xFF;
- }
- return -1;
- }
-
- /**
- * Reads a byte from the buffer without advancing the index pointer.
- *
- * @return byte from the buffer without advancing the index pointer
- * @throws IOException if something wrong happens
- */
- public int peek() throws IOException {
- if (ensureBytes(1)) {
- return buffer[index] & 0xFF;
- }
- return -1;
- }
-
- /**
- * Reads byte from the buffer without any checks. This method never reads from the underlaying
- * stream. Before calling this method the {@link #ensureBytes} method must have been called.
- *
- * @return The next byte from the buffer.
- * @throws ArrayIndexOutOfBoundsException If ensureBytes was not called to make sure the buffer
- * contains the byte.
- */
- public byte readRaw() {
- return buffer[index++];
- }
-
- /**
- * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
- * fields.
- *
- * @param n The amount of bytes to ensure exists in buffer
- * @return true if required bytes are available and false if EOF
- * @throws IOException If reading of the wrapped stream failed.
- */
- public boolean ensureBytes(int n) throws IOException {
- return ensureBytes(n, true);
- }
-
- /**
- * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
- * fields.
- *
- * @param n The amount of bytes to ensure exists in buffer
- * @param block whether or not to block the IO
- * @return true if required bytes are available and false if EOF or the parameter block was false and socket timeout occurred.
- * @throws IOException If reading of the wrapped stream failed.
- */
- public boolean ensureBytes(int n, boolean block) throws IOException {
- int required = n - endIndex + index;
- while (required > 0) {
- if (!readMore(required, block)) {
- return false;
- }
- required = n - endIndex + index;
- }
- return true;
- }
-
- /**
- * Reads more bytes into the buffer.
- *
- * @param wanted How much should be at least read.
- * @return True if at least some bytes were read.
- * @throws IOException If reading of the wrapped stream failed.
- */
- private boolean readMore(int wanted, boolean block) throws IOException {
- if (endIndex == index) {
- index = 0;
- endIndex = 0;
- }
- int canFit = buffer.length - endIndex;
- if (canFit < wanted) {
- // would the wanted bytes fit if we compacted the buffer
- // and still leave some slack
- if (index + canFit > wanted + MINIMUM_READ) {
- compact();
- } else {
- doubleBuffer();
- }
- canFit = buffer.length - endIndex;
- }
- int read = 0;
- try {
- read = wrapped.read(buffer, endIndex, canFit);
- if (!block && read == 0) {
- return false;
- }
- } catch (SocketTimeoutException e) {
- if (!block) {
- return false;
- }
- if (timeoutRequested) {
- throw e;
- }
- }
- if (read < 0) {
- return false;
- }
- endIndex += read;
- return true;
- }
-
- /**
- * Doubles the size of the buffer.
- */
- private void doubleBuffer() {
- byte[] buf = new byte[buffer.length * 2];
- moveBufferTo(buf);
- buffer = buf;
- }
-
- /**
- * Compacts the unread bytes of the buffer to the beginning of the buffer.
- */
- private void compact() {
- moveBufferTo(buffer);
- }
-
- /**
- * Moves bytes from the buffer to the beginning of the destination buffer. Also sets the index and
- * endIndex variables.
- *
- * @param dest The destination buffer.
- */
- private void moveBufferTo(byte[] dest) {
- int size = endIndex - index;
- System.arraycopy(buffer, index, dest, 0, size);
- index = 0;
- endIndex = size;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int read(byte[] to, int off, int len) throws IOException {
- if ((off | len | (off + len) | (to.length - (off + len))) < 0) {
- throw new IndexOutOfBoundsException();
- } else if (len == 0) {
- return 0;
+ /**
+ * Creates a new buffer around the given stream.
+ *
+ * @param in The stream to buffer.
+ * @param bufferSize The initial size of the buffer.
+ */
+ public VisibleBufferedInputStream(InputStream in, int bufferSize) {
+ wrapped = in;
+ buffer = new byte[bufferSize < MINIMUM_READ ? MINIMUM_READ : bufferSize];
}
- // if the read would go to wrapped stream, but would result
- // in a small read then try read to the buffer instead
- int avail = endIndex - index;
- if (len - avail < MINIMUM_READ) {
- ensureBytes(len);
- avail = endIndex - index;
- }
-
- // first copy from buffer
- if (avail > 0) {
- if (len <= avail) {
- System.arraycopy(buffer, index, to, off, len);
- index += len;
- return len;
- }
- System.arraycopy(buffer, index, to, off, avail);
- len -= avail;
- off += avail;
- }
- int read = avail;
-
- // good place to reset index because the buffer is fully drained
- index = 0;
- endIndex = 0;
-
- // then directly from wrapped stream
- do {
- int r;
- try {
- r = wrapped.read(to, off, len);
- } catch (SocketTimeoutException e) {
- if (read == 0 && timeoutRequested) {
- throw e;
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int read() throws IOException {
+ if (ensureBytes(1)) {
+ return buffer[index++] & 0xFF;
}
+ return -1;
+ }
+
+ /**
+ * Reads a byte from the buffer without advancing the index pointer.
+ *
+ * @return byte from the buffer without advancing the index pointer
+ * @throws IOException if something wrong happens
+ */
+ public int peek() throws IOException {
+ if (ensureBytes(1)) {
+ return buffer[index] & 0xFF;
+ }
+ return -1;
+ }
+
+ /**
+ * Reads byte from the buffer without any checks. This method never reads from the underlaying
+ * stream. Before calling this method the {@link #ensureBytes} method must have been called.
+ *
+ * @return The next byte from the buffer.
+ * @throws ArrayIndexOutOfBoundsException If ensureBytes was not called to make sure the buffer
+ * contains the byte.
+ */
+ public byte readRaw() {
+ return buffer[index++];
+ }
+
+ /**
+ * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
+ * fields.
+ *
+ * @param n The amount of bytes to ensure exists in buffer
+ * @return true if required bytes are available and false if EOF
+ * @throws IOException If reading of the wrapped stream failed.
+ */
+ public boolean ensureBytes(int n) throws IOException {
+ return ensureBytes(n, true);
+ }
+
+ /**
+ * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
+ * fields.
+ *
+ * @param n The amount of bytes to ensure exists in buffer
+ * @param block whether or not to block the IO
+ * @return true if required bytes are available and false if EOF or the parameter block was false and socket timeout occurred.
+ * @throws IOException If reading of the wrapped stream failed.
+ */
+ public boolean ensureBytes(int n, boolean block) throws IOException {
+ int required = n - endIndex + index;
+ while (required > 0) {
+ if (!readMore(required, block)) {
+ return false;
+ }
+ required = n - endIndex + index;
+ }
+ return true;
+ }
+
+ /**
+ * Reads more bytes into the buffer.
+ *
+ * @param wanted How much should be at least read.
+ * @return True if at least some bytes were read.
+ * @throws IOException If reading of the wrapped stream failed.
+ */
+ private boolean readMore(int wanted, boolean block) throws IOException {
+ if (endIndex == index) {
+ index = 0;
+ endIndex = 0;
+ }
+ int canFit = buffer.length - endIndex;
+ if (canFit < wanted) {
+ // would the wanted bytes fit if we compacted the buffer
+ // and still leave some slack
+ if (index + canFit > wanted + MINIMUM_READ) {
+ compact();
+ } else {
+ doubleBuffer();
+ }
+ canFit = buffer.length - endIndex;
+ }
+ int read = 0;
+ try {
+ read = wrapped.read(buffer, endIndex, canFit);
+ if (!block && read == 0) {
+ return false;
+ }
+ } catch (SocketTimeoutException e) {
+ if (!block) {
+ return false;
+ }
+ if (timeoutRequested) {
+ throw e;
+ }
+ }
+ if (read < 0) {
+ return false;
+ }
+ endIndex += read;
+ return true;
+ }
+
+ /**
+ * Doubles the size of the buffer.
+ */
+ private void doubleBuffer() {
+ byte[] buf = new byte[buffer.length * 2];
+ moveBufferTo(buf);
+ buffer = buf;
+ }
+
+ /**
+ * Compacts the unread bytes of the buffer to the beginning of the buffer.
+ */
+ private void compact() {
+ moveBufferTo(buffer);
+ }
+
+ /**
+ * Moves bytes from the buffer to the beginning of the destination buffer. Also sets the index and
+ * endIndex variables.
+ *
+ * @param dest The destination buffer.
+ */
+ private void moveBufferTo(byte[] dest) {
+ int size = endIndex - index;
+ System.arraycopy(buffer, index, dest, 0, size);
+ index = 0;
+ endIndex = size;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int read(byte[] to, int off, int len) throws IOException {
+ if ((off | len | (off + len) | (to.length - (off + len))) < 0) {
+ throw new IndexOutOfBoundsException();
+ } else if (len == 0) {
+ return 0;
+ }
+
+ // if the read would go to wrapped stream, but would result
+ // in a small read then try read to the buffer instead
+ int avail = endIndex - index;
+ if (len - avail < MINIMUM_READ) {
+ ensureBytes(len);
+ avail = endIndex - index;
+ }
+
+ // first copy from buffer
+ if (avail > 0) {
+ if (len <= avail) {
+ System.arraycopy(buffer, index, to, off, len);
+ index += len;
+ return len;
+ }
+ System.arraycopy(buffer, index, to, off, avail);
+ len -= avail;
+ off += avail;
+ }
+ int read = avail;
+
+ // good place to reset index because the buffer is fully drained
+ index = 0;
+ endIndex = 0;
+
+ // then directly from wrapped stream
+ do {
+ int r;
+ try {
+ r = wrapped.read(to, off, len);
+ } catch (SocketTimeoutException e) {
+ if (read == 0 && timeoutRequested) {
+ throw e;
+ }
+ return read;
+ }
+ if (r <= 0) {
+ return read == 0 ? r : read;
+ }
+ read += r;
+ off += r;
+ len -= r;
+ } while (len > 0);
+
return read;
- }
- if (r <= 0) {
- return read == 0 ? r : read;
- }
- read += r;
- off += r;
- len -= r;
- } while (len > 0);
-
- return read;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public long skip(long n) throws IOException {
- int avail = endIndex - index;
- if (n >= Integer.MAX_VALUE) {
- throw new IllegalArgumentException("n is too large");
}
- if (avail >= n) {
- index = index + (int)n;
- return n;
- }
- n -= avail;
- index = 0;
- endIndex = 0;
- return avail + wrapped.skip(n);
- }
- /**
- * {@inheritDoc}
- */
- @Override
- public int available() throws IOException {
- int avail = endIndex - index;
- return avail > 0 ? avail : wrapped.available();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void close() throws IOException {
- wrapped.close();
- }
-
- /**
- * Returns direct handle to the used buffer. Use the {@link #ensureBytes} to prefill required
- * bytes the buffer and {@link #getIndex} to fetch the current position of the buffer.
- *
- * @return The underlaying buffer.
- */
- public byte[] getBuffer() {
- return buffer;
- }
-
- /**
- * Returns the current read position in the buffer.
- *
- * @return the current read position in the buffer.
- */
- public int getIndex() {
- return index;
- }
-
- /**
- * Scans the length of the next null terminated string (C-style string) from the stream.
- *
- * @return The length of the next null terminated string.
- * @throws IOException If reading of stream fails.
- * @throws EOFException If the stream did not contain any null terminators.
- */
- public int scanCStringLength() throws IOException {
- int pos = index;
- while (true) {
- while (pos < endIndex) {
- if (buffer[pos++] == '\0') {
- return pos - index;
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public long skip(long n) throws IOException {
+ int avail = endIndex - index;
+ if (n >= Integer.MAX_VALUE) {
+ throw new IllegalArgumentException("n is too large");
}
- }
- if (!readMore(STRING_SCAN_SPAN, true)) {
- throw new EOFException();
- }
- pos = index;
+ if (avail >= n) {
+ index = index + (int) n;
+ return n;
+ }
+ n -= avail;
+ index = 0;
+ endIndex = 0;
+ return avail + wrapped.skip(n);
}
- }
- public void setTimeoutRequested(boolean timeoutRequested) {
- this.timeoutRequested = timeoutRequested;
- }
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int available() throws IOException {
+ int avail = endIndex - index;
+ return avail > 0 ? avail : wrapped.available();
+ }
- /**
- *
- * @return the wrapped stream
- */
- public InputStream getWrapped() {
- return wrapped;
- }
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void close() throws IOException {
+ wrapped.close();
+ }
+
+ /**
+ * Returns direct handle to the used buffer. Use the {@link #ensureBytes} to prefill required
+ * bytes the buffer and {@link #getIndex} to fetch the current position of the buffer.
+ *
+ * @return The underlaying buffer.
+ */
+ public byte[] getBuffer() {
+ return buffer;
+ }
+
+ /**
+ * Returns the current read position in the buffer.
+ *
+ * @return the current read position in the buffer.
+ */
+ public int getIndex() {
+ return index;
+ }
+
+ /**
+ * Scans the length of the next null terminated string (C-style string) from the stream.
+ *
+ * @return The length of the next null terminated string.
+ * @throws IOException If reading of stream fails.
+ * @throws EOFException If the stream did not contain any null terminators.
+ */
+ public int scanCStringLength() throws IOException {
+ int pos = index;
+ while (true) {
+ while (pos < endIndex) {
+ if (buffer[pos++] == '\0') {
+ return pos - index;
+ }
+ }
+ if (!readMore(STRING_SCAN_SPAN, true)) {
+ throw new EOFException();
+ }
+ pos = index;
+ }
+ }
+
+ public void setTimeoutRequested(boolean timeoutRequested) {
+ this.timeoutRequested = timeoutRequested;
+ }
+
+ /**
+ * @return the wrapped stream
+ */
+ public InputStream getWrapped() {
+ return wrapped;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java b/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java
index f6cf40a..01420b4 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java
@@ -5,14 +5,6 @@
package org.postgresql.core.v3;
-import org.postgresql.PGProperty;
-import org.postgresql.plugin.AuthenticationPlugin;
-import org.postgresql.plugin.AuthenticationRequestType;
-import org.postgresql.util.GT;
-import org.postgresql.util.ObjectFactory;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
@@ -21,105 +13,112 @@ import java.util.Arrays;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
+import org.postgresql.PGProperty;
+import org.postgresql.plugin.AuthenticationPlugin;
+import org.postgresql.plugin.AuthenticationRequestType;
+import org.postgresql.util.GT;
+import org.postgresql.util.ObjectFactory;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
class AuthenticationPluginManager {
- private static final Logger LOGGER = Logger.getLogger(AuthenticationPluginManager.class.getName());
+ private static final Logger LOGGER = Logger.getLogger(AuthenticationPluginManager.class.getName());
- @FunctionalInterface
- public interface PasswordAction {
- R apply(T password) throws PSQLException, IOException;
- }
-
- private AuthenticationPluginManager() {
- }
-
- /**
- * If a password is requested by the server during connection initiation, this
- * method will be invoked to supply the password. This method will only be
- * invoked if the server actually requests a password, e.g. trust authentication
- * will skip it entirely.
- *
- *
The caller provides a action method that will be invoked with the {@code char[]}
- * password. After completion, for security reasons the {@code char[]} array will be
- * wiped by filling it with zeroes. Callers must not rely on being able to read
- * the password {@code char[]} after the action has completed.
- *
- * @param type The authentication type that is being requested
- * @param info The connection properties for the connection
- * @param action The action to invoke with the password
- * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated
- * @throws IOException Bubbles up any thrown IOException from the provided action
- */
- public static T withPassword(AuthenticationRequestType type, Properties info,
- PasswordAction action) throws PSQLException, IOException {
- char[] password = null;
-
- String authPluginClassName = PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info);
-
- if (authPluginClassName == null || "".equals(authPluginClassName)) {
- // Default auth plugin simply pulls password directly from connection properties
- String passwordText = PGProperty.PASSWORD.getOrDefault(info);
- if (passwordText != null) {
- password = passwordText.toCharArray();
- }
- } else {
- AuthenticationPlugin authPlugin;
- try {
- authPlugin = ObjectFactory.instantiate(AuthenticationPlugin.class, authPluginClassName, info,
- false, null);
- } catch (Exception ex) {
- String msg = GT.tr("Unable to load Authentication Plugin {0}", authPluginClassName);
- LOGGER.log(Level.FINE, msg, ex);
- throw new PSQLException(msg, PSQLState.INVALID_PARAMETER_VALUE, ex);
- }
-
- password = authPlugin.getPassword(type);
+ private AuthenticationPluginManager() {
}
- try {
- return action.apply(password);
- } finally {
- if (password != null) {
- Arrays.fill(password, (char) 0);
- }
+ /**
+ * If a password is requested by the server during connection initiation, this
+ * method will be invoked to supply the password. This method will only be
+ * invoked if the server actually requests a password, e.g. trust authentication
+ * will skip it entirely.
+ *
+ *
The caller provides a action method that will be invoked with the {@code char[]}
+ * password. After completion, for security reasons the {@code char[]} array will be
+ * wiped by filling it with zeroes. Callers must not rely on being able to read
+ * the password {@code char[]} after the action has completed.
+ *
+ * @param type The authentication type that is being requested
+ * @param info The connection properties for the connection
+ * @param action The action to invoke with the password
+ * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated
+ * @throws IOException Bubbles up any thrown IOException from the provided action
+ */
+ public static T withPassword(AuthenticationRequestType type, Properties info,
+ PasswordAction action) throws PSQLException, IOException {
+ char[] password = null;
+
+ String authPluginClassName = PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info);
+
+ if (authPluginClassName == null || "".equals(authPluginClassName)) {
+ // Default auth plugin simply pulls password directly from connection properties
+ String passwordText = PGProperty.PASSWORD.getOrDefault(info);
+ if (passwordText != null) {
+ password = passwordText.toCharArray();
+ }
+ } else {
+ AuthenticationPlugin authPlugin;
+ try {
+ authPlugin = ObjectFactory.instantiate(AuthenticationPlugin.class, authPluginClassName, info,
+ false, null);
+ } catch (Exception ex) {
+ String msg = GT.tr("Unable to load Authentication Plugin {0}", authPluginClassName);
+ LOGGER.log(Level.FINE, msg, ex);
+ throw new PSQLException(msg, PSQLState.INVALID_PARAMETER_VALUE, ex);
+ }
+
+ password = authPlugin.getPassword(type);
+ }
+
+ try {
+ return action.apply(password);
+ } finally {
+ if (password != null) {
+ Arrays.fill(password, (char) 0);
+ }
+ }
}
- }
- /**
- * Helper that wraps {@link #withPassword(AuthenticationRequestType, Properties, PasswordAction)}, checks that it is not-null, and encodes
- * it as a byte array. Used by internal code paths that require an encoded password
- * that may be an empty string, but not null.
- *
- *
The caller provides a callback method that will be invoked with the {@code byte[]}
- * encoded password. After completion, for security reasons the {@code byte[]} array will be
- * wiped by filling it with zeroes. Callers must not rely on being able to read
- * the password {@code byte[]} after the callback has completed.
+ /**
+ * Helper that wraps {@link #withPassword(AuthenticationRequestType, Properties, PasswordAction)}, checks that it is not-null, and encodes
+ * it as a byte array. Used by internal code paths that require an encoded password
+ * that may be an empty string, but not null.
+ *
+ *
The caller provides a callback method that will be invoked with the {@code byte[]}
+ * encoded password. After completion, for security reasons the {@code byte[]} array will be
+ * wiped by filling it with zeroes. Callers must not rely on being able to read
+ * the password {@code byte[]} after the callback has completed.
+ *
+ * @param type The authentication type that is being requested
+ * @param info The connection properties for the connection
+ * @param action The action to invoke with the encoded password
+ * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated or if the retrieved password is null.
+ * @throws IOException Bubbles up any thrown IOException from the provided callback
+ */
+ public static T withEncodedPassword(AuthenticationRequestType type, Properties info,
+ PasswordAction action) throws PSQLException, IOException {
+ byte[] encodedPassword = withPassword(type, info, password -> {
+ if (password == null) {
+ throw new PSQLException(
+ GT.tr("The server requested password-based authentication, but no password was provided by plugin {0}",
+ PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info)),
+ PSQLState.CONNECTION_REJECTED);
+ }
+ ByteBuffer buf = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password));
+ byte[] bytes = new byte[buf.limit()];
+ buf.get(bytes);
+ return bytes;
+ });
- * @param type The authentication type that is being requested
- * @param info The connection properties for the connection
- * @param action The action to invoke with the encoded password
- * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated or if the retrieved password is null.
- * @throws IOException Bubbles up any thrown IOException from the provided callback
- */
- public static T withEncodedPassword(AuthenticationRequestType type, Properties info,
- PasswordAction action) throws PSQLException, IOException {
- byte[] encodedPassword = withPassword(type, info, password -> {
- if (password == null) {
- throw new PSQLException(
- GT.tr("The server requested password-based authentication, but no password was provided by plugin {0}",
- PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info)),
- PSQLState.CONNECTION_REJECTED);
- }
- ByteBuffer buf = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password));
- byte[] bytes = new byte[buf.limit()];
- buf.get(bytes);
- return bytes;
- });
-
- try {
- return action.apply(encodedPassword);
- } finally {
- Arrays.fill(encodedPassword, (byte) 0);
+ try {
+ return action.apply(encodedPassword);
+ } finally {
+ Arrays.fill(encodedPassword, (byte) 0);
+ }
+ }
+
+ @FunctionalInterface
+ public interface PasswordAction {
+ R apply(T password) throws PSQLException, IOException;
}
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java
index ed57f75..7f26f2f 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java
@@ -16,167 +16,166 @@ import org.postgresql.core.ParameterList;
*
* @author Jeremy Whiting jwhiting@redhat.com
* @author Christopher Deckers (chrriis@gmail.com)
- *
*/
public class BatchedQuery extends SimpleQuery {
- private String sql;
- private final int valuesBraceOpenPosition;
- private final int valuesBraceClosePosition;
- private final int batchSize;
- private BatchedQuery [] blocks;
+ private final int valuesBraceOpenPosition;
+ private final int valuesBraceClosePosition;
+ private final int batchSize;
+ private String sql;
+ private BatchedQuery[] blocks;
- public BatchedQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
- int valuesBraceOpenPosition,
- int valuesBraceClosePosition, boolean sanitiserDisabled) {
- super(query, transferModeRegistry, sanitiserDisabled);
- this.valuesBraceOpenPosition = valuesBraceOpenPosition;
- this.valuesBraceClosePosition = valuesBraceClosePosition;
- this.batchSize = 1;
- }
+ public BatchedQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
+ int valuesBraceOpenPosition,
+ int valuesBraceClosePosition, boolean sanitiserDisabled) {
+ super(query, transferModeRegistry, sanitiserDisabled);
+ this.valuesBraceOpenPosition = valuesBraceOpenPosition;
+ this.valuesBraceClosePosition = valuesBraceClosePosition;
+ this.batchSize = 1;
+ }
- private BatchedQuery(BatchedQuery src, int batchSize) {
- super(src);
- this.valuesBraceOpenPosition = src.valuesBraceOpenPosition;
- this.valuesBraceClosePosition = src.valuesBraceClosePosition;
- this.batchSize = batchSize;
- }
+ private BatchedQuery(BatchedQuery src, int batchSize) {
+ super(src);
+ this.valuesBraceOpenPosition = src.valuesBraceOpenPosition;
+ this.valuesBraceClosePosition = src.valuesBraceClosePosition;
+ this.batchSize = batchSize;
+ }
- public BatchedQuery deriveForMultiBatch(int valueBlock) {
- if (getBatchSize() != 1) {
- throw new IllegalStateException("Only the original decorator can be derived.");
- }
- if (valueBlock == 1) {
- return this;
- }
- int index = Integer.numberOfTrailingZeros(valueBlock) - 1;
- if (valueBlock > 128 || valueBlock != (1 << (index + 1))) {
- throw new IllegalArgumentException(
- "Expected value block should be a power of 2 smaller or equal to 128. Actual block is "
- + valueBlock);
- }
- if (blocks == null) {
- blocks = new BatchedQuery[7];
- }
- BatchedQuery bq = blocks[index];
- if (bq == null) {
- bq = new BatchedQuery(this, valueBlock);
- blocks[index] = bq;
- }
- return bq;
- }
-
- @Override
- public int getBatchSize() {
- return batchSize;
- }
-
- /**
- * Method to return the sql based on number of batches. Skipping the initial
- * batch.
- */
- @Override
- public String getNativeSql() {
- if (sql != null) {
- return sql;
- }
- sql = buildNativeSql(null);
- return sql;
- }
-
- private String buildNativeSql(ParameterList params) {
- String sql = null;
- // dynamically build sql with parameters for batches
- String nativeSql = super.getNativeSql();
- int batchSize = getBatchSize();
- if (batchSize < 2) {
- sql = nativeSql;
- return sql;
- }
- if (nativeSql == null) {
- sql = "";
- return sql;
- }
- int valuesBlockCharCount = 0;
- // Split the values section around every dynamic parameter.
- int[] bindPositions = getNativeQuery().bindPositions;
- int[] chunkStart = new int[1 + bindPositions.length];
- int[] chunkEnd = new int[1 + bindPositions.length];
- chunkStart[0] = valuesBraceOpenPosition;
- if (bindPositions.length == 0) {
- valuesBlockCharCount = valuesBraceClosePosition - valuesBraceOpenPosition + 1;
- chunkEnd[0] = valuesBraceClosePosition + 1;
- } else {
- chunkEnd[0] = bindPositions[0];
- // valuesBlockCharCount += chunks[0].length;
- valuesBlockCharCount += chunkEnd[0] - chunkStart[0];
- for (int i = 0; i < bindPositions.length; i++) {
- int startIndex = bindPositions[i] + 2;
- int endIndex =
- i < bindPositions.length - 1 ? bindPositions[i + 1] : valuesBraceClosePosition + 1;
- for (; startIndex < endIndex; startIndex++) {
- if (!Character.isDigit(nativeSql.charAt(startIndex))) {
- break;
- }
+ public BatchedQuery deriveForMultiBatch(int valueBlock) {
+ if (getBatchSize() != 1) {
+ throw new IllegalStateException("Only the original decorator can be derived.");
}
- chunkStart[i + 1] = startIndex;
- chunkEnd[i + 1] = endIndex;
- // valuesBlockCharCount += chunks[i + 1].length;
- valuesBlockCharCount += chunkEnd[i + 1] - chunkStart[i + 1];
- }
+ if (valueBlock == 1) {
+ return this;
+ }
+ int index = Integer.numberOfTrailingZeros(valueBlock) - 1;
+ if (valueBlock > 128 || valueBlock != (1 << (index + 1))) {
+ throw new IllegalArgumentException(
+ "Expected value block should be a power of 2 smaller or equal to 128. Actual block is "
+ + valueBlock);
+ }
+ if (blocks == null) {
+ blocks = new BatchedQuery[7];
+ }
+ BatchedQuery bq = blocks[index];
+ if (bq == null) {
+ bq = new BatchedQuery(this, valueBlock);
+ blocks[index] = bq;
+ }
+ return bq;
}
- int length = nativeSql.length();
- //valuesBraceOpenPosition + valuesBlockCharCount;
- length += NativeQuery.calculateBindLength(bindPositions.length * batchSize);
- length -= NativeQuery.calculateBindLength(bindPositions.length);
- length += (valuesBlockCharCount + 1 /*comma*/) * (batchSize - 1 /* initial sql */);
- StringBuilder s = new StringBuilder(length);
- // Add query until end of values parameter block.
- int pos;
- if (bindPositions.length > 0 && params == null) {
- // Add the first values (...) clause, it would be values($1,..., $n), and it matches with
- // the values clause of a simple non-rewritten SQL
- s.append(nativeSql, 0, valuesBraceClosePosition + 1);
- pos = bindPositions.length + 1;
- } else {
- pos = 1;
- batchSize++; // do not use super.toString(params) as it does not work if query ends with --
- // We need to carefully add (...),(...), and we do not want to get (...) --, (...)
- // s.append(super.toString(params));
- s.append(nativeSql, 0, valuesBraceOpenPosition);
+ @Override
+ public int getBatchSize() {
+ return batchSize;
}
- for (int i = 2; i <= batchSize; i++) {
- if (i > 2 || pos != 1) {
- // For "has binds" the first valuds
- s.append(',');
- }
- s.append(nativeSql, chunkStart[0], chunkEnd[0]);
- for (int j = 1; j < chunkStart.length; j++) {
- if (params == null) {
- NativeQuery.appendBindName(s, pos++);
+
+ /**
+ * Method to return the sql based on number of batches. Skipping the initial
+ * batch.
+ */
+ @Override
+ public String getNativeSql() {
+ if (sql != null) {
+ return sql;
+ }
+ sql = buildNativeSql(null);
+ return sql;
+ }
+
+ private String buildNativeSql(ParameterList params) {
+ String sql = null;
+ // dynamically build sql with parameters for batches
+ String nativeSql = super.getNativeSql();
+ int batchSize = getBatchSize();
+ if (batchSize < 2) {
+ sql = nativeSql;
+ return sql;
+ }
+ if (nativeSql == null) {
+ sql = "";
+ return sql;
+ }
+ int valuesBlockCharCount = 0;
+ // Split the values section around every dynamic parameter.
+ int[] bindPositions = getNativeQuery().bindPositions;
+ int[] chunkStart = new int[1 + bindPositions.length];
+ int[] chunkEnd = new int[1 + bindPositions.length];
+ chunkStart[0] = valuesBraceOpenPosition;
+ if (bindPositions.length == 0) {
+ valuesBlockCharCount = valuesBraceClosePosition - valuesBraceOpenPosition + 1;
+ chunkEnd[0] = valuesBraceClosePosition + 1;
} else {
- s.append(params.toString(pos++, true));
+ chunkEnd[0] = bindPositions[0];
+ // valuesBlockCharCount += chunks[0].length;
+ valuesBlockCharCount += chunkEnd[0] - chunkStart[0];
+ for (int i = 0; i < bindPositions.length; i++) {
+ int startIndex = bindPositions[i] + 2;
+ int endIndex =
+ i < bindPositions.length - 1 ? bindPositions[i + 1] : valuesBraceClosePosition + 1;
+ for (; startIndex < endIndex; startIndex++) {
+ if (!Character.isDigit(nativeSql.charAt(startIndex))) {
+ break;
+ }
+ }
+ chunkStart[i + 1] = startIndex;
+ chunkEnd[i + 1] = endIndex;
+ // valuesBlockCharCount += chunks[i + 1].length;
+ valuesBlockCharCount += chunkEnd[i + 1] - chunkStart[i + 1];
+ }
}
- s.append(nativeSql, chunkStart[j], chunkEnd[j]);
- }
- }
- // Add trailing content: final query is like original with multi values.
- // This could contain "--" comments, so it is important to add them at end.
- s.append(nativeSql, valuesBraceClosePosition + 1, nativeSql.length());
- sql = s.toString();
- // Predict length only when building sql with $1, $2, ... (that is no specific params given)
- assert params != null || s.length() == length
- : "Predicted length != actual: " + length + " !=" + s.length();
- return sql;
- }
+ int length = nativeSql.length();
+ //valuesBraceOpenPosition + valuesBlockCharCount;
+ length += NativeQuery.calculateBindLength(bindPositions.length * batchSize);
+ length -= NativeQuery.calculateBindLength(bindPositions.length);
+ length += (valuesBlockCharCount + 1 /*comma*/) * (batchSize - 1 /* initial sql */);
- @Override
- public String toString(ParameterList params) {
- if (getBatchSize() < 2) {
- return super.toString(params);
+ StringBuilder s = new StringBuilder(length);
+ // Add query until end of values parameter block.
+ int pos;
+ if (bindPositions.length > 0 && params == null) {
+ // Add the first values (...) clause, it would be values($1,..., $n), and it matches with
+ // the values clause of a simple non-rewritten SQL
+ s.append(nativeSql, 0, valuesBraceClosePosition + 1);
+ pos = bindPositions.length + 1;
+ } else {
+ pos = 1;
+ batchSize++; // do not use super.toString(params) as it does not work if query ends with --
+ // We need to carefully add (...),(...), and we do not want to get (...) --, (...)
+ // s.append(super.toString(params));
+ s.append(nativeSql, 0, valuesBraceOpenPosition);
+ }
+ for (int i = 2; i <= batchSize; i++) {
+ if (i > 2 || pos != 1) {
+ // For "has binds" the first valuds
+ s.append(',');
+ }
+ s.append(nativeSql, chunkStart[0], chunkEnd[0]);
+ for (int j = 1; j < chunkStart.length; j++) {
+ if (params == null) {
+ NativeQuery.appendBindName(s, pos++);
+ } else {
+ s.append(params.toString(pos++, true));
+ }
+ s.append(nativeSql, chunkStart[j], chunkEnd[j]);
+ }
+ }
+ // Add trailing content: final query is like original with multi values.
+ // This could contain "--" comments, so it is important to add them at end.
+ s.append(nativeSql, valuesBraceClosePosition + 1, nativeSql.length());
+ sql = s.toString();
+ // Predict length only when building sql with $1, $2, ... (that is no specific params given)
+ assert params != null || s.length() == length
+ : "Predicted length != actual: " + length + " !=" + s.length();
+ return sql;
+ }
+
+ @Override
+ public String toString(ParameterList params) {
+ if (getBatchSize() < 2) {
+ return super.toString(params);
+ }
+ return buildNativeSql(params);
}
- return buildNativeSql(params);
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java
index 8075834..2193d2b 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java
@@ -6,15 +6,14 @@
package org.postgresql.core.v3;
+import java.io.InputStream;
+import java.sql.SQLException;
import org.postgresql.core.ParameterList;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
-import java.io.InputStream;
-import java.sql.SQLException;
-
/**
* Parameter list for V3 query strings that contain multiple statements. We delegate to one
* SimpleParameterList per statement, and translate parameter indexes as needed.
@@ -22,194 +21,194 @@ import java.sql.SQLException;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class CompositeParameterList implements V3ParameterList {
- CompositeParameterList(SimpleParameterList[] subparams, int[] offsets) {
- this.subparams = subparams;
- this.offsets = offsets;
- this.total = offsets[offsets.length - 1] + subparams[offsets.length - 1].getInParameterCount();
- }
+ private final int total;
+ private final SimpleParameterList[] subparams;
+ private final int[] offsets;
- private int findSubParam(int index) throws SQLException {
- if (index < 1 || index > total) {
- throw new PSQLException(
- GT.tr("The column index is out of range: {0}, number of columns: {1}.", index, total),
- PSQLState.INVALID_PARAMETER_VALUE);
+ CompositeParameterList(SimpleParameterList[] subparams, int[] offsets) {
+ this.subparams = subparams;
+ this.offsets = offsets;
+ this.total = offsets[offsets.length - 1] + subparams[offsets.length - 1].getInParameterCount();
}
- for (int i = offsets.length - 1; i >= 0; i--) {
- if (offsets[i] < index) {
- return i;
- }
+ private int findSubParam(int index) throws SQLException {
+ if (index < 1 || index > total) {
+ throw new PSQLException(
+ GT.tr("The column index is out of range: {0}, number of columns: {1}.", index, total),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+
+ for (int i = offsets.length - 1; i >= 0; i--) {
+ if (offsets[i] < index) {
+ return i;
+ }
+ }
+
+ throw new IllegalArgumentException("I am confused; can't find a subparam for index " + index);
}
- throw new IllegalArgumentException("I am confused; can't find a subparam for index " + index);
- }
+ @Override
+ public void registerOutParameter(int index, int sqlType) {
- @Override
- public void registerOutParameter(int index, int sqlType) {
-
- }
-
- public int getDirection(int i) {
- return 0;
- }
-
- @Override
- public int getParameterCount() {
- return total;
- }
-
- @Override
- public int getInParameterCount() {
- return total;
- }
-
- @Override
- public int getOutParameterCount() {
- return 0;
- }
-
- @Override
- public int[] getTypeOIDs() {
- int[] oids = new int[total];
- for (int i = 0; i < offsets.length; i++) {
- int[] subOids = subparams[i].getTypeOIDs();
- System.arraycopy(subOids, 0, oids, offsets[i], subOids.length);
- }
- return oids;
- }
-
- @Override
- public void setIntParameter(int index, int value) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setIntParameter(index - offsets[sub], value);
- }
-
- @Override
- public void setLiteralParameter(int index, String value, int oid) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setStringParameter(index - offsets[sub], value, oid);
- }
-
- @Override
- public void setStringParameter(int index, String value, int oid) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setStringParameter(index - offsets[sub], value, oid);
- }
-
- @Override
- public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setBinaryParameter(index - offsets[sub], value, oid);
- }
-
- @Override
- public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setBytea(index - offsets[sub], data, offset, length);
- }
-
- @Override
- public void setBytea(int index, InputStream stream, int length) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setBytea(index - offsets[sub], stream, length);
- }
-
- @Override
- public void setBytea(int index, InputStream stream) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setBytea(index - offsets[sub], stream);
- }
-
- @Override
- public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setBytea(index - offsets[sub], writer);
- }
-
- @Override
- public void setText(int index, InputStream stream) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setText(index - offsets[sub], stream);
- }
-
- @Override
- public void setNull(int index, int oid) throws SQLException {
- int sub = findSubParam(index);
- subparams[sub].setNull(index - offsets[sub], oid);
- }
-
- @Override
- public String toString(int index, boolean standardConformingStrings) {
- try {
- int sub = findSubParam(index);
- return subparams[sub].toString(index - offsets[sub], standardConformingStrings);
- } catch (SQLException e) {
- throw new IllegalStateException(e.getMessage());
- }
- }
-
- @Override
- public ParameterList copy() {
- SimpleParameterList[] copySub = new SimpleParameterList[subparams.length];
- for (int sub = 0; sub < subparams.length; sub++) {
- copySub[sub] = (SimpleParameterList) subparams[sub].copy();
}
- return new CompositeParameterList(copySub, offsets);
- }
-
- @Override
- public void clear() {
- for (SimpleParameterList subparam : subparams) {
- subparam.clear();
+ public int getDirection(int i) {
+ return 0;
}
- }
- @Override
- public SimpleParameterList [] getSubparams() {
- return subparams;
- }
-
- @Override
- public void checkAllParametersSet() throws SQLException {
- for (SimpleParameterList subparam : subparams) {
- subparam.checkAllParametersSet();
+ @Override
+ public int getParameterCount() {
+ return total;
}
- }
- @Override
- public byte [][] getEncoding() {
- return null; // unsupported
- }
-
- @Override
- public byte [] getFlags() {
- return null; // unsupported
- }
-
- @Override
- public int [] getParamTypes() {
- return null; // unsupported
- }
-
- @Override
- public Object [] getValues() {
- return null; // unsupported
- }
-
- @Override
- public void appendAll(ParameterList list) throws SQLException {
- // no-op, unsupported
- }
-
- @Override
- public void convertFunctionOutParameters() {
- for (SimpleParameterList subparam : subparams) {
- subparam.convertFunctionOutParameters();
+ @Override
+ public int getInParameterCount() {
+ return total;
}
- }
- private final int total;
- private final SimpleParameterList[] subparams;
- private final int[] offsets;
+ @Override
+ public int getOutParameterCount() {
+ return 0;
+ }
+
+ @Override
+ public int[] getTypeOIDs() {
+ int[] oids = new int[total];
+ for (int i = 0; i < offsets.length; i++) {
+ int[] subOids = subparams[i].getTypeOIDs();
+ System.arraycopy(subOids, 0, oids, offsets[i], subOids.length);
+ }
+ return oids;
+ }
+
+ @Override
+ public void setIntParameter(int index, int value) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setIntParameter(index - offsets[sub], value);
+ }
+
+ @Override
+ public void setLiteralParameter(int index, String value, int oid) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setStringParameter(index - offsets[sub], value, oid);
+ }
+
+ @Override
+ public void setStringParameter(int index, String value, int oid) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setStringParameter(index - offsets[sub], value, oid);
+ }
+
+ @Override
+ public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setBinaryParameter(index - offsets[sub], value, oid);
+ }
+
+ @Override
+ public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setBytea(index - offsets[sub], data, offset, length);
+ }
+
+ @Override
+ public void setBytea(int index, InputStream stream, int length) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setBytea(index - offsets[sub], stream, length);
+ }
+
+ @Override
+ public void setBytea(int index, InputStream stream) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setBytea(index - offsets[sub], stream);
+ }
+
+ @Override
+ public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setBytea(index - offsets[sub], writer);
+ }
+
+ @Override
+ public void setText(int index, InputStream stream) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setText(index - offsets[sub], stream);
+ }
+
+ @Override
+ public void setNull(int index, int oid) throws SQLException {
+ int sub = findSubParam(index);
+ subparams[sub].setNull(index - offsets[sub], oid);
+ }
+
+ @Override
+ public String toString(int index, boolean standardConformingStrings) {
+ try {
+ int sub = findSubParam(index);
+ return subparams[sub].toString(index - offsets[sub], standardConformingStrings);
+ } catch (SQLException e) {
+ throw new IllegalStateException(e.getMessage());
+ }
+ }
+
+ @Override
+ public ParameterList copy() {
+ SimpleParameterList[] copySub = new SimpleParameterList[subparams.length];
+ for (int sub = 0; sub < subparams.length; sub++) {
+ copySub[sub] = (SimpleParameterList) subparams[sub].copy();
+ }
+
+ return new CompositeParameterList(copySub, offsets);
+ }
+
+ @Override
+ public void clear() {
+ for (SimpleParameterList subparam : subparams) {
+ subparam.clear();
+ }
+ }
+
+ @Override
+ public SimpleParameterList[] getSubparams() {
+ return subparams;
+ }
+
+ @Override
+ public void checkAllParametersSet() throws SQLException {
+ for (SimpleParameterList subparam : subparams) {
+ subparam.checkAllParametersSet();
+ }
+ }
+
+ @Override
+ public byte[][] getEncoding() {
+ return null; // unsupported
+ }
+
+ @Override
+ public byte[] getFlags() {
+ return null; // unsupported
+ }
+
+ @Override
+ public int[] getParamTypes() {
+ return null; // unsupported
+ }
+
+ @Override
+ public Object[] getValues() {
+ return null; // unsupported
+ }
+
+ @Override
+ public void appendAll(ParameterList list) throws SQLException {
+ // no-op, unsupported
+ }
+
+ @Override
+ public void convertFunctionOutParameters() {
+ for (SimpleParameterList subparam : subparams) {
+ subparam.convertFunctionOutParameters();
+ }
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java
index bb34876..e0792b2 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java
@@ -6,12 +6,11 @@
package org.postgresql.core.v3;
+import java.util.Map;
import org.postgresql.core.ParameterList;
import org.postgresql.core.Query;
import org.postgresql.core.SqlCommand;
-import java.util.Map;
-
/**
* V3 Query implementation for queries that involve multiple statements. We split it up into one
* SimpleQuery per statement, and wrap the corresponding per-statement SimpleParameterList objects
@@ -20,92 +19,92 @@ import java.util.Map;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class CompositeQuery implements Query {
- CompositeQuery(SimpleQuery[] subqueries, int[] offsets) {
- this.subqueries = subqueries;
- this.offsets = offsets;
- }
+ private final SimpleQuery[] subqueries;
+ private final int[] offsets;
- @Override
- public ParameterList createParameterList() {
- SimpleParameterList[] subparams = new SimpleParameterList[subqueries.length];
- for (int i = 0; i < subqueries.length; i++) {
- subparams[i] = (SimpleParameterList) subqueries[i].createParameterList();
+ CompositeQuery(SimpleQuery[] subqueries, int[] offsets) {
+ this.subqueries = subqueries;
+ this.offsets = offsets;
}
- return new CompositeParameterList(subparams, offsets);
- }
- @Override
- public String toString(ParameterList parameters) {
- StringBuilder sbuf = new StringBuilder(subqueries[0].toString());
- for (int i = 1; i < subqueries.length; i++) {
- sbuf.append(';');
- sbuf.append(subqueries[i]);
+ @Override
+ public ParameterList createParameterList() {
+ SimpleParameterList[] subparams = new SimpleParameterList[subqueries.length];
+ for (int i = 0; i < subqueries.length; i++) {
+ subparams[i] = (SimpleParameterList) subqueries[i].createParameterList();
+ }
+ return new CompositeParameterList(subparams, offsets);
}
- return sbuf.toString();
- }
- @Override
- public String getNativeSql() {
- StringBuilder sbuf = new StringBuilder(subqueries[0].getNativeSql());
- for (int i = 1; i < subqueries.length; i++) {
- sbuf.append(';');
- sbuf.append(subqueries[i].getNativeSql());
+ @Override
+ public String toString(ParameterList parameters) {
+ StringBuilder sbuf = new StringBuilder(subqueries[0].toString());
+ for (int i = 1; i < subqueries.length; i++) {
+ sbuf.append(';');
+ sbuf.append(subqueries[i]);
+ }
+ return sbuf.toString();
}
- return sbuf.toString();
- }
- @Override
- public SqlCommand getSqlCommand() {
- return null;
- }
-
- @Override
- public String toString() {
- return toString(null);
- }
-
- @Override
- public void close() {
- for (SimpleQuery subquery : subqueries) {
- subquery.close();
+ @Override
+ public String getNativeSql() {
+ StringBuilder sbuf = new StringBuilder(subqueries[0].getNativeSql());
+ for (int i = 1; i < subqueries.length; i++) {
+ sbuf.append(';');
+ sbuf.append(subqueries[i].getNativeSql());
+ }
+ return sbuf.toString();
}
- }
- @Override
- public Query[] getSubqueries() {
- return subqueries;
- }
-
- @Override
- public boolean isStatementDescribed() {
- for (SimpleQuery subquery : subqueries) {
- if (!subquery.isStatementDescribed()) {
- return false;
- }
+ @Override
+ public SqlCommand getSqlCommand() {
+ return null;
}
- return true;
- }
- @Override
- public boolean isEmpty() {
- for (SimpleQuery subquery : subqueries) {
- if (!subquery.isEmpty()) {
- return false;
- }
+ @Override
+ public String toString() {
+ return toString(null);
}
- return true;
- }
- @Override
- public int getBatchSize() {
- return 0; // no-op, unsupported
- }
+ @Override
+ public void close() {
+ for (SimpleQuery subquery : subqueries) {
+ subquery.close();
+ }
+ }
- @Override
- public Map getResultSetColumnNameIndexMap() {
- return null; // unsupported
- }
+ @Override
+ public Query[] getSubqueries() {
+ return subqueries;
+ }
- private final SimpleQuery[] subqueries;
- private final int[] offsets;
+ @Override
+ public boolean isStatementDescribed() {
+ for (SimpleQuery subquery : subqueries) {
+ if (!subquery.isStatementDescribed()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean isEmpty() {
+ for (SimpleQuery subquery : subqueries) {
+ if (!subquery.isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public int getBatchSize() {
+ return 0; // no-op, unsupported
+ }
+
+ @Override
+ public Map getResultSetColumnNameIndexMap() {
+ return null; // unsupported
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java
index 1815a91..8944451 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java
@@ -6,6 +6,22 @@
package org.postgresql.core.v3;
+import java.io.IOException;
+import java.net.ConnectException;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.TimeZone;
+import java.util.logging.Level;
+import java.util.logging.LogRecord;
+import java.util.logging.Logger;
+import javax.net.SocketFactory;
import org.postgresql.PGProperty;
import org.postgresql.core.ConnectionFactory;
import org.postgresql.core.PGStream;
@@ -35,24 +51,6 @@ import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.ServerErrorMessage;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.nio.charset.StandardCharsets;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.TimeZone;
-import java.util.logging.Level;
-import java.util.logging.LogRecord;
-import java.util.logging.Logger;
-
-import javax.net.SocketFactory;
-
/**
* ConnectionFactory implementation for version 3 (7.4+) connections.
*
@@ -60,397 +58,373 @@ import javax.net.SocketFactory;
*/
public class ConnectionFactoryImpl extends ConnectionFactory {
- private static class StartupParam {
- private final String key;
- private final String value;
+ private static final Logger LOGGER = Logger.getLogger(ConnectionFactoryImpl.class.getName());
+ private static final int AUTH_REQ_OK = 0;
+ private static final int AUTH_REQ_KRB4 = 1;
+ private static final int AUTH_REQ_KRB5 = 2;
+ private static final int AUTH_REQ_PASSWORD = 3;
+ private static final int AUTH_REQ_CRYPT = 4;
+ private static final int AUTH_REQ_MD5 = 5;
+ private static final int AUTH_REQ_SCM = 6;
+ private static final int AUTH_REQ_GSS = 7;
+ private static final int AUTH_REQ_GSS_CONTINUE = 8;
+ private static final int AUTH_REQ_SSPI = 9;
+ private static final int AUTH_REQ_SASL = 10;
+ private static final int AUTH_REQ_SASL_CONTINUE = 11;
+ private static final int AUTH_REQ_SASL_FINAL = 12;
+ private static final String IN_HOT_STANDBY = "in_hot_standby";
- StartupParam(String key, String value) {
- this.key = key;
- this.value = value;
+ public ConnectionFactoryImpl() {
+ }
+
+ private static void log(Level level, String msg, Throwable thrown, Object... params) {
+ if (!LOGGER.isLoggable(level)) {
+ return;
+ }
+ LogRecord rec = new LogRecord(level, msg);
+ // Set the loggerName of the LogRecord with the current logger
+ rec.setLoggerName(LOGGER.getName());
+ rec.setParameters(params);
+ rec.setThrown(thrown);
+ LOGGER.log(rec);
+ }
+
+ /**
+ * Convert Java time zone to postgres time zone. All others stay the same except that GMT+nn
+ * changes to GMT-nn and vise versa.
+ * If you provide GMT+/-nn postgres uses POSIX rules which has a positive sign for west of Greenwich
+ * JAVA uses ISO rules which the positive sign is east of Greenwich
+ * To make matters more interesting postgres will always report in ISO
+ *
+ * @return The current JVM time zone in postgresql format.
+ */
+ private static String createPostgresTimeZone() {
+ String tz = TimeZone.getDefault().getID();
+ if (tz.length() <= 3 || !tz.startsWith("GMT")) {
+ return tz;
+ }
+ char sign = tz.charAt(3);
+ String start;
+ switch (sign) {
+ case '+':
+ start = "GMT-";
+ break;
+ case '-':
+ start = "GMT+";
+ break;
+ default:
+ // unknown type
+ return tz;
+ }
+
+ return start + tz.substring(4);
+ }
+
+ private PGStream tryConnect(Properties info, SocketFactory socketFactory, HostSpec hostSpec,
+ SslMode sslMode, GSSEncMode gssEncMode)
+ throws SQLException, IOException {
+ int connectTimeout = PGProperty.CONNECT_TIMEOUT.getInt(info) * 1000;
+ String user = PGProperty.USER.getOrDefault(info);
+ String database = PGProperty.PG_DBNAME.getOrDefault(info);
+ if (user == null) {
+ throw new PSQLException(GT.tr("User cannot be null"), PSQLState.INVALID_NAME);
+ }
+ if (database == null) {
+ throw new PSQLException(GT.tr("Database cannot be null"), PSQLState.INVALID_NAME);
+ }
+
+ PGStream newStream = new PGStream(socketFactory, hostSpec, connectTimeout);
+ try {
+ // Set the socket timeout if the "socketTimeout" property has been set.
+ int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info);
+ if (socketTimeout > 0) {
+ newStream.setNetworkTimeout(socketTimeout * 1000);
+ }
+
+ String maxResultBuffer = PGProperty.MAX_RESULT_BUFFER.getOrDefault(info);
+ newStream.setMaxResultBuffer(maxResultBuffer);
+
+ // Enable TCP keep-alive probe if required.
+ boolean requireTCPKeepAlive = PGProperty.TCP_KEEP_ALIVE.getBoolean(info);
+ newStream.getSocket().setKeepAlive(requireTCPKeepAlive);
+
+ // Enable TCP no delay if required
+ boolean requireTCPNoDelay = PGProperty.TCP_NO_DELAY.getBoolean(info);
+ newStream.getSocket().setTcpNoDelay(requireTCPNoDelay);
+
+ // Try to set SO_SNDBUF and SO_RECVBUF socket options, if requested.
+ // If receiveBufferSize and send_buffer_size are set to a value greater
+ // than 0, adjust. -1 means use the system default, 0 is ignored since not
+ // supported.
+
+ // Set SO_RECVBUF read buffer size
+ int receiveBufferSize = PGProperty.RECEIVE_BUFFER_SIZE.getInt(info);
+ if (receiveBufferSize > -1) {
+ // value of 0 not a valid buffer size value
+ if (receiveBufferSize > 0) {
+ newStream.getSocket().setReceiveBufferSize(receiveBufferSize);
+ } else {
+ LOGGER.log(Level.WARNING, "Ignore invalid value for receiveBufferSize: {0}",
+ receiveBufferSize);
+ }
+ }
+
+ // Set SO_SNDBUF write buffer size
+ int sendBufferSize = PGProperty.SEND_BUFFER_SIZE.getInt(info);
+ if (sendBufferSize > -1) {
+ if (sendBufferSize > 0) {
+ newStream.getSocket().setSendBufferSize(sendBufferSize);
+ } else {
+ LOGGER.log(Level.WARNING, "Ignore invalid value for sendBufferSize: {0}", sendBufferSize);
+ }
+ }
+
+ if (LOGGER.isLoggable(Level.FINE)) {
+ LOGGER.log(Level.FINE, "Receive Buffer Size is {0}",
+ newStream.getSocket().getReceiveBufferSize());
+ LOGGER.log(Level.FINE, "Send Buffer Size is {0}",
+ newStream.getSocket().getSendBufferSize());
+ }
+
+ newStream = enableGSSEncrypted(newStream, gssEncMode, hostSpec.getHost(), info, connectTimeout);
+
+ // if we have a security context then gss negotiation succeeded. Do not attempt SSL
+ // negotiation
+ if (!newStream.isGssEncrypted()) {
+ // Construct and send an ssl startup packet if requested.
+ newStream = enableSSL(newStream, sslMode, info, connectTimeout);
+ }
+
+ // Make sure to set network timeout again, in case the stream changed due to GSS or SSL
+ if (socketTimeout > 0) {
+ newStream.setNetworkTimeout(socketTimeout * 1000);
+ }
+
+ List paramList = getParametersForStartup(user, database, info);
+ sendStartupPacket(newStream, paramList);
+
+ // Do authentication (until AuthenticationOk).
+ doAuthentication(newStream, hostSpec.getHost(), user, info);
+
+ return newStream;
+ } catch (Exception e) {
+ closeStream(newStream);
+ throw e;
+ }
}
@Override
- public String toString() {
- return this.key + "=" + this.value;
- }
+ public QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException {
+ SslMode sslMode = SslMode.of(info);
+ GSSEncMode gssEncMode = GSSEncMode.of(info);
- public byte[] getEncodedKey() {
- return this.key.getBytes(StandardCharsets.UTF_8);
- }
-
- public byte[] getEncodedValue() {
- return this.value.getBytes(StandardCharsets.UTF_8);
- }
- }
-
- private static final Logger LOGGER = Logger.getLogger(ConnectionFactoryImpl.class.getName());
- private static final int AUTH_REQ_OK = 0;
- private static final int AUTH_REQ_KRB4 = 1;
- private static final int AUTH_REQ_KRB5 = 2;
- private static final int AUTH_REQ_PASSWORD = 3;
- private static final int AUTH_REQ_CRYPT = 4;
- private static final int AUTH_REQ_MD5 = 5;
- private static final int AUTH_REQ_SCM = 6;
- private static final int AUTH_REQ_GSS = 7;
- private static final int AUTH_REQ_GSS_CONTINUE = 8;
- private static final int AUTH_REQ_SSPI = 9;
- private static final int AUTH_REQ_SASL = 10;
- private static final int AUTH_REQ_SASL_CONTINUE = 11;
- private static final int AUTH_REQ_SASL_FINAL = 12;
-
- private static final String IN_HOT_STANDBY = "in_hot_standby";
-
- public ConnectionFactoryImpl() {
- }
-
- private PGStream tryConnect(Properties info, SocketFactory socketFactory, HostSpec hostSpec,
- SslMode sslMode, GSSEncMode gssEncMode)
- throws SQLException, IOException {
- int connectTimeout = PGProperty.CONNECT_TIMEOUT.getInt(info) * 1000;
- String user = PGProperty.USER.getOrDefault(info);
- String database = PGProperty.PG_DBNAME.getOrDefault(info);
- if (user == null) {
- throw new PSQLException(GT.tr("User cannot be null"), PSQLState.INVALID_NAME);
- }
- if (database == null) {
- throw new PSQLException(GT.tr("Database cannot be null"), PSQLState.INVALID_NAME);
- }
-
- PGStream newStream = new PGStream(socketFactory, hostSpec, connectTimeout);
- try {
- // Set the socket timeout if the "socketTimeout" property has been set.
- int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info);
- if (socketTimeout > 0) {
- newStream.setNetworkTimeout(socketTimeout * 1000);
- }
-
- String maxResultBuffer = PGProperty.MAX_RESULT_BUFFER.getOrDefault(info);
- newStream.setMaxResultBuffer(maxResultBuffer);
-
- // Enable TCP keep-alive probe if required.
- boolean requireTCPKeepAlive = PGProperty.TCP_KEEP_ALIVE.getBoolean(info);
- newStream.getSocket().setKeepAlive(requireTCPKeepAlive);
-
- // Enable TCP no delay if required
- boolean requireTCPNoDelay = PGProperty.TCP_NO_DELAY.getBoolean(info);
- newStream.getSocket().setTcpNoDelay(requireTCPNoDelay);
-
- // Try to set SO_SNDBUF and SO_RECVBUF socket options, if requested.
- // If receiveBufferSize and send_buffer_size are set to a value greater
- // than 0, adjust. -1 means use the system default, 0 is ignored since not
- // supported.
-
- // Set SO_RECVBUF read buffer size
- int receiveBufferSize = PGProperty.RECEIVE_BUFFER_SIZE.getInt(info);
- if (receiveBufferSize > -1) {
- // value of 0 not a valid buffer size value
- if (receiveBufferSize > 0) {
- newStream.getSocket().setReceiveBufferSize(receiveBufferSize);
- } else {
- LOGGER.log(Level.WARNING, "Ignore invalid value for receiveBufferSize: {0}",
- receiveBufferSize);
- }
- }
-
- // Set SO_SNDBUF write buffer size
- int sendBufferSize = PGProperty.SEND_BUFFER_SIZE.getInt(info);
- if (sendBufferSize > -1) {
- if (sendBufferSize > 0) {
- newStream.getSocket().setSendBufferSize(sendBufferSize);
- } else {
- LOGGER.log(Level.WARNING, "Ignore invalid value for sendBufferSize: {0}", sendBufferSize);
- }
- }
-
- if (LOGGER.isLoggable(Level.FINE)) {
- LOGGER.log(Level.FINE, "Receive Buffer Size is {0}",
- newStream.getSocket().getReceiveBufferSize());
- LOGGER.log(Level.FINE, "Send Buffer Size is {0}",
- newStream.getSocket().getSendBufferSize());
- }
-
- newStream = enableGSSEncrypted(newStream, gssEncMode, hostSpec.getHost(), info, connectTimeout);
-
- // if we have a security context then gss negotiation succeeded. Do not attempt SSL
- // negotiation
- if (!newStream.isGssEncrypted()) {
- // Construct and send an ssl startup packet if requested.
- newStream = enableSSL(newStream, sslMode, info, connectTimeout);
- }
-
- // Make sure to set network timeout again, in case the stream changed due to GSS or SSL
- if (socketTimeout > 0) {
- newStream.setNetworkTimeout(socketTimeout * 1000);
- }
-
- List paramList = getParametersForStartup(user, database, info);
- sendStartupPacket(newStream, paramList);
-
- // Do authentication (until AuthenticationOk).
- doAuthentication(newStream, hostSpec.getHost(), user, info);
-
- return newStream;
- } catch (Exception e) {
- closeStream(newStream);
- throw e;
- }
- }
-
- @Override
- public QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException {
- SslMode sslMode = SslMode.of(info);
- GSSEncMode gssEncMode = GSSEncMode.of(info);
-
- HostRequirement targetServerType;
- String targetServerTypeStr = PGProperty.TARGET_SERVER_TYPE.getOrDefault(info);
- try {
- targetServerType = HostRequirement.getTargetServerType(targetServerTypeStr);
- } catch (IllegalArgumentException ex) {
- throw new PSQLException(
- GT.tr("Invalid targetServerType value: {0}", targetServerTypeStr),
- PSQLState.CONNECTION_UNABLE_TO_CONNECT);
- }
-
- SocketFactory socketFactory = SocketFactoryFactory.getSocketFactory(info);
-
- HostChooser hostChooser =
- HostChooserFactory.createHostChooser(hostSpecs, targetServerType, info);
- Iterator hostIter = hostChooser.iterator();
- Map knownStates = new HashMap<>();
- while (hostIter.hasNext()) {
- CandidateHost candidateHost = hostIter.next();
- HostSpec hostSpec = candidateHost.hostSpec;
- LOGGER.log(Level.FINE, "Trying to establish a protocol version 3 connection to {0}", hostSpec);
-
- // Note: per-connect-attempt status map is used here instead of GlobalHostStatusTracker
- // for the case when "no good hosts" match (e.g. all the hosts are known as "connectfail")
- // In that case, the system tries to connect to each host in order, thus it should not look into
- // GlobalHostStatusTracker
- HostStatus knownStatus = knownStates.get(hostSpec);
- if (knownStatus != null && !candidateHost.targetServerType.allowConnectingTo(knownStatus)) {
- if (LOGGER.isLoggable(Level.FINER)) {
- LOGGER.log(Level.FINER, "Known status of host {0} is {1}, and required status was {2}. Will try next host",
- new Object[]{hostSpec, knownStatus, candidateHost.targetServerType});
- }
- continue;
- }
-
- //
- // Establish a connection.
- //
-
- PGStream newStream = null;
- try {
+ HostRequirement targetServerType;
+ String targetServerTypeStr = PGProperty.TARGET_SERVER_TYPE.getOrDefault(info);
try {
- newStream = tryConnect(info, socketFactory, hostSpec, sslMode, gssEncMode);
- } catch (SQLException e) {
- if (sslMode == SslMode.PREFER
- && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
- // Try non-SSL connection to cover case like "non-ssl only db"
- // Note: PREFER allows loss of encryption, so no significant harm is made
- Throwable ex = null;
+ targetServerType = HostRequirement.getTargetServerType(targetServerTypeStr);
+ } catch (IllegalArgumentException ex) {
+ throw new PSQLException(
+ GT.tr("Invalid targetServerType value: {0}", targetServerTypeStr),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+ }
+
+ SocketFactory socketFactory = SocketFactoryFactory.getSocketFactory(info);
+
+ HostChooser hostChooser =
+ HostChooserFactory.createHostChooser(hostSpecs, targetServerType, info);
+ Iterator hostIter = hostChooser.iterator();
+ Map knownStates = new HashMap<>();
+ while (hostIter.hasNext()) {
+ CandidateHost candidateHost = hostIter.next();
+ HostSpec hostSpec = candidateHost.hostSpec;
+ LOGGER.log(Level.FINE, "Trying to establish a protocol version 3 connection to {0}", hostSpec);
+
+ // Note: per-connect-attempt status map is used here instead of GlobalHostStatusTracker
+ // for the case when "no good hosts" match (e.g. all the hosts are known as "connectfail")
+ // In that case, the system tries to connect to each host in order, thus it should not look into
+ // GlobalHostStatusTracker
+ HostStatus knownStatus = knownStates.get(hostSpec);
+ if (knownStatus != null && !candidateHost.targetServerType.allowConnectingTo(knownStatus)) {
+ if (LOGGER.isLoggable(Level.FINER)) {
+ LOGGER.log(Level.FINER, "Known status of host {0} is {1}, and required status was {2}. Will try next host",
+ new Object[]{hostSpec, knownStatus, candidateHost.targetServerType});
+ }
+ continue;
+ }
+
+ //
+ // Establish a connection.
+ //
+
+ PGStream newStream = null;
try {
- newStream =
- tryConnect(info, socketFactory, hostSpec, SslMode.DISABLE, gssEncMode);
- LOGGER.log(Level.FINE, "Downgraded to non-encrypted connection for host {0}",
- hostSpec);
- } catch (SQLException | IOException ee) {
- ex = ee;
+ try {
+ newStream = tryConnect(info, socketFactory, hostSpec, sslMode, gssEncMode);
+ } catch (SQLException e) {
+ if (sslMode == SslMode.PREFER
+ && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
+ // Try non-SSL connection to cover case like "non-ssl only db"
+ // Note: PREFER allows loss of encryption, so no significant harm is made
+ Throwable ex = null;
+ try {
+ newStream =
+ tryConnect(info, socketFactory, hostSpec, SslMode.DISABLE, gssEncMode);
+ LOGGER.log(Level.FINE, "Downgraded to non-encrypted connection for host {0}",
+ hostSpec);
+ } catch (SQLException | IOException ee) {
+ ex = ee;
+ }
+
+ if (ex != null) {
+ log(Level.FINE, "sslMode==PREFER, however non-SSL connection failed as well", ex);
+ // non-SSL failed as well, so re-throw original exception
+ // Add non-SSL exception as suppressed
+ e.addSuppressed(ex);
+ throw e;
+ }
+ } else if (sslMode == SslMode.ALLOW
+ && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
+ // Try using SSL
+ Throwable ex = null;
+ try {
+ newStream =
+ tryConnect(info, socketFactory, hostSpec, SslMode.REQUIRE, gssEncMode);
+ LOGGER.log(Level.FINE, "Upgraded to encrypted connection for host {0}",
+ hostSpec);
+ } catch (SQLException ee) {
+ ex = ee;
+ } catch (IOException ee) {
+ ex = ee; // Can't use multi-catch in Java 6 :(
+ }
+ if (ex != null) {
+ log(Level.FINE, "sslMode==ALLOW, however SSL connection failed as well", ex);
+ // non-SSL failed as well, so re-throw original exception
+ // Add SSL exception as suppressed
+ e.addSuppressed(ex);
+ throw e;
+ }
+
+ } else {
+ throw e;
+ }
+ }
+
+ int cancelSignalTimeout = PGProperty.CANCEL_SIGNAL_TIMEOUT.getInt(info) * 1000;
+
+ // Do final startup.
+ QueryExecutor queryExecutor = new QueryExecutorImpl(newStream, cancelSignalTimeout, info);
+
+ // Check Primary or Secondary
+ HostStatus hostStatus = HostStatus.ConnectOK;
+ if (candidateHost.targetServerType != HostRequirement.any) {
+ hostStatus = isPrimary(queryExecutor) ? HostStatus.Primary : HostStatus.Secondary;
+ }
+ GlobalHostStatusTracker.reportHostStatus(hostSpec, hostStatus);
+ knownStates.put(hostSpec, hostStatus);
+ if (!candidateHost.targetServerType.allowConnectingTo(hostStatus)) {
+ queryExecutor.close();
+ continue;
+ }
+
+ runInitialQueries(queryExecutor, info);
+
+ // And we're done.
+ return queryExecutor;
+ } catch (ConnectException cex) {
+ // Added by Peter Mount
+ // ConnectException is thrown when the connection cannot be made.
+ // we trap this an return a more meaningful message for the end user
+ GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
+ knownStates.put(hostSpec, HostStatus.ConnectFail);
+ if (hostIter.hasNext()) {
+ log(Level.FINE, "ConnectException occurred while connecting to {0}", cex, hostSpec);
+ // still more addresses to try
+ continue;
+ }
+ throw new PSQLException(GT.tr(
+ "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.",
+ hostSpec), PSQLState.CONNECTION_UNABLE_TO_CONNECT, cex);
+ } catch (IOException ioe) {
+ closeStream(newStream);
+ GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
+ knownStates.put(hostSpec, HostStatus.ConnectFail);
+ if (hostIter.hasNext()) {
+ log(Level.FINE, "IOException occurred while connecting to {0}", ioe, hostSpec);
+ // still more addresses to try
+ continue;
+ }
+ throw new PSQLException(GT.tr("The connection attempt failed."),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
+ } catch (SQLException se) {
+ closeStream(newStream);
+ GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
+ knownStates.put(hostSpec, HostStatus.ConnectFail);
+ if (hostIter.hasNext()) {
+ log(Level.FINE, "SQLException occurred while connecting to {0}", se, hostSpec);
+ // still more addresses to try
+ continue;
+ }
+ throw se;
}
+ }
+ throw new PSQLException(GT
+ .tr("Could not find a server with specified targetServerType: {0}", targetServerType),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+ }
- if (ex != null) {
- log(Level.FINE, "sslMode==PREFER, however non-SSL connection failed as well", ex);
- // non-SSL failed as well, so re-throw original exception
- // Add non-SSL exception as suppressed
- e.addSuppressed(ex);
- throw e;
+ private List getParametersForStartup(String user, String database, Properties info) {
+ List paramList = new ArrayList<>();
+ paramList.add(new StartupParam("user", user));
+ paramList.add(new StartupParam("database", database));
+ paramList.add(new StartupParam("client_encoding", "UTF8"));
+ paramList.add(new StartupParam("DateStyle", "ISO"));
+ paramList.add(new StartupParam("TimeZone", createPostgresTimeZone()));
+
+ Version assumeVersion = ServerVersion.from(PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info));
+
+ if (assumeVersion.getVersionNum() >= ServerVersion.v9_0.getVersionNum()) {
+ // User is explicitly telling us this is a 9.0+ server so set properties here:
+ paramList.add(new StartupParam("extra_float_digits", "3"));
+ String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
+ if (appName != null) {
+ paramList.add(new StartupParam("application_name", appName));
}
- } else if (sslMode == SslMode.ALLOW
- && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
- // Try using SSL
- Throwable ex = null;
- try {
- newStream =
- tryConnect(info, socketFactory, hostSpec, SslMode.REQUIRE, gssEncMode);
- LOGGER.log(Level.FINE, "Upgraded to encrypted connection for host {0}",
- hostSpec);
- } catch (SQLException ee) {
- ex = ee;
- } catch (IOException ee) {
- ex = ee; // Can't use multi-catch in Java 6 :(
- }
- if (ex != null) {
- log(Level.FINE, "sslMode==ALLOW, however SSL connection failed as well", ex);
- // non-SSL failed as well, so re-throw original exception
- // Add SSL exception as suppressed
- e.addSuppressed(ex);
- throw e;
- }
-
- } else {
- throw e;
- }
+ } else {
+ // User has not explicitly told us that this is a 9.0+ server so stick to old default:
+ paramList.add(new StartupParam("extra_float_digits", "2"));
}
- int cancelSignalTimeout = PGProperty.CANCEL_SIGNAL_TIMEOUT.getInt(info) * 1000;
-
- // Do final startup.
- QueryExecutor queryExecutor = new QueryExecutorImpl(newStream, cancelSignalTimeout, info);
-
- // Check Primary or Secondary
- HostStatus hostStatus = HostStatus.ConnectOK;
- if (candidateHost.targetServerType != HostRequirement.any) {
- hostStatus = isPrimary(queryExecutor) ? HostStatus.Primary : HostStatus.Secondary;
- }
- GlobalHostStatusTracker.reportHostStatus(hostSpec, hostStatus);
- knownStates.put(hostSpec, hostStatus);
- if (!candidateHost.targetServerType.allowConnectingTo(hostStatus)) {
- queryExecutor.close();
- continue;
+ String replication = PGProperty.REPLICATION.getOrDefault(info);
+ if (replication != null && assumeVersion.getVersionNum() >= ServerVersion.v9_4.getVersionNum()) {
+ paramList.add(new StartupParam("replication", replication));
}
- runInitialQueries(queryExecutor, info);
-
- // And we're done.
- return queryExecutor;
- } catch (ConnectException cex) {
- // Added by Peter Mount
- // ConnectException is thrown when the connection cannot be made.
- // we trap this an return a more meaningful message for the end user
- GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
- knownStates.put(hostSpec, HostStatus.ConnectFail);
- if (hostIter.hasNext()) {
- log(Level.FINE, "ConnectException occurred while connecting to {0}", cex, hostSpec);
- // still more addresses to try
- continue;
+ String currentSchema = PGProperty.CURRENT_SCHEMA.getOrDefault(info);
+ if (currentSchema != null) {
+ paramList.add(new StartupParam("search_path", currentSchema));
}
- throw new PSQLException(GT.tr(
- "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.",
- hostSpec), PSQLState.CONNECTION_UNABLE_TO_CONNECT, cex);
- } catch (IOException ioe) {
- closeStream(newStream);
- GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
- knownStates.put(hostSpec, HostStatus.ConnectFail);
- if (hostIter.hasNext()) {
- log(Level.FINE, "IOException occurred while connecting to {0}", ioe, hostSpec);
- // still more addresses to try
- continue;
+
+ String options = PGProperty.OPTIONS.getOrDefault(info);
+ if (options != null) {
+ paramList.add(new StartupParam("options", options));
}
- throw new PSQLException(GT.tr("The connection attempt failed."),
- PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
- } catch (SQLException se) {
- closeStream(newStream);
- GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
- knownStates.put(hostSpec, HostStatus.ConnectFail);
- if (hostIter.hasNext()) {
- log(Level.FINE, "SQLException occurred while connecting to {0}", se, hostSpec);
- // still more addresses to try
- continue;
+
+ return paramList;
+ }
+
+ @SuppressWarnings("fallthrough")
+ private PGStream enableGSSEncrypted(PGStream pgStream, GSSEncMode gssEncMode, String host, Properties info,
+ int connectTimeout)
+ throws IOException, PSQLException {
+
+ if (gssEncMode == GSSEncMode.DISABLE) {
+ return pgStream;
}
- throw se;
- }
- }
- throw new PSQLException(GT
- .tr("Could not find a server with specified targetServerType: {0}", targetServerType),
- PSQLState.CONNECTION_UNABLE_TO_CONNECT);
- }
- private List getParametersForStartup(String user, String database, Properties info) {
- List paramList = new ArrayList<>();
- paramList.add(new StartupParam("user", user));
- paramList.add(new StartupParam("database", database));
- paramList.add(new StartupParam("client_encoding", "UTF8"));
- paramList.add(new StartupParam("DateStyle", "ISO"));
- paramList.add(new StartupParam("TimeZone", createPostgresTimeZone()));
-
- Version assumeVersion = ServerVersion.from(PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info));
-
- if (assumeVersion.getVersionNum() >= ServerVersion.v9_0.getVersionNum()) {
- // User is explicitly telling us this is a 9.0+ server so set properties here:
- paramList.add(new StartupParam("extra_float_digits", "3"));
- String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
- if (appName != null) {
- paramList.add(new StartupParam("application_name", appName));
- }
- } else {
- // User has not explicitly told us that this is a 9.0+ server so stick to old default:
- paramList.add(new StartupParam("extra_float_digits", "2"));
- }
-
- String replication = PGProperty.REPLICATION.getOrDefault(info);
- if (replication != null && assumeVersion.getVersionNum() >= ServerVersion.v9_4.getVersionNum()) {
- paramList.add(new StartupParam("replication", replication));
- }
-
- String currentSchema = PGProperty.CURRENT_SCHEMA.getOrDefault(info);
- if (currentSchema != null) {
- paramList.add(new StartupParam("search_path", currentSchema));
- }
-
- String options = PGProperty.OPTIONS.getOrDefault(info);
- if (options != null) {
- paramList.add(new StartupParam("options", options));
- }
-
- return paramList;
- }
-
- private static void log(Level level, String msg, Throwable thrown, Object... params) {
- if (!LOGGER.isLoggable(level)) {
- return;
- }
- LogRecord rec = new LogRecord(level, msg);
- // Set the loggerName of the LogRecord with the current logger
- rec.setLoggerName(LOGGER.getName());
- rec.setParameters(params);
- rec.setThrown(thrown);
- LOGGER.log(rec);
- }
-
- /**
- * Convert Java time zone to postgres time zone. All others stay the same except that GMT+nn
- * changes to GMT-nn and vise versa.
- * If you provide GMT+/-nn postgres uses POSIX rules which has a positive sign for west of Greenwich
- * JAVA uses ISO rules which the positive sign is east of Greenwich
- * To make matters more interesting postgres will always report in ISO
- *
- * @return The current JVM time zone in postgresql format.
- */
- private static String createPostgresTimeZone() {
- String tz = TimeZone.getDefault().getID();
- if (tz.length() <= 3 || !tz.startsWith("GMT")) {
- return tz;
- }
- char sign = tz.charAt(3);
- String start;
- switch (sign) {
- case '+':
- start = "GMT-";
- break;
- case '-':
- start = "GMT+";
- break;
- default:
- // unknown type
- return tz;
- }
-
- return start + tz.substring(4);
- }
-
- @SuppressWarnings("fallthrough")
- private PGStream enableGSSEncrypted(PGStream pgStream, GSSEncMode gssEncMode, String host, Properties info,
- int connectTimeout)
- throws IOException, PSQLException {
-
- if ( gssEncMode == GSSEncMode.DISABLE ) {
- return pgStream;
- }
-
- if (gssEncMode == GSSEncMode.ALLOW ) {
- // start with plain text and let the server request it
- return pgStream;
- }
+ if (gssEncMode == GSSEncMode.ALLOW) {
+ // start with plain text and let the server request it
+ return pgStream;
+ }
/*
at this point gssEncMode is either PREFER or REQUIRE
@@ -462,446 +436,468 @@ public class ConnectionFactoryImpl extends ConnectionFactory {
/*
let's see if the server will allow a GSS encrypted connection
*/
- String user = PGProperty.USER.getOrDefault(info);
- if (user == null) {
- throw new PSQLException("GSSAPI encryption required but was impossible user is null", PSQLState.CONNECTION_REJECTED);
- }
-
- // attempt to acquire a GSS encrypted connection
- LOGGER.log(Level.FINEST, " FE=> GSSENCRequest");
-
- int gssTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
- int currentTimeout = pgStream.getNetworkTimeout();
-
- // if the current timeout is less than sslTimeout then
- // use the smaller timeout. We could do something tricky
- // here to not set it in that case but this is pretty readable
- if (currentTimeout > 0 && currentTimeout < gssTimeout) {
- gssTimeout = currentTimeout;
- }
-
- pgStream.setNetworkTimeout(gssTimeout);
-
- // Send GSSEncryption request packet
- pgStream.sendInteger4(8);
- pgStream.sendInteger2(1234);
- pgStream.sendInteger2(5680);
- pgStream.flush();
- // Now get the response from the backend, one of N, E, S.
- int beresp = pgStream.receiveChar();
- pgStream.setNetworkTimeout(currentTimeout);
- switch (beresp) {
- case 'E':
- LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Error");
-
- // Server doesn't even know about the SSL handshake protocol
- if (gssEncMode.requireEncryption()) {
- throw new PSQLException(GT.tr("The server does not support GSS Encoding."),
- PSQLState.CONNECTION_REJECTED);
+ String user = PGProperty.USER.getOrDefault(info);
+ if (user == null) {
+ throw new PSQLException("GSSAPI encryption required but was impossible user is null", PSQLState.CONNECTION_REJECTED);
}
- // We have to reconnect to continue.
- pgStream.close();
- return new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), connectTimeout);
+ // attempt to acquire a GSS encrypted connection
+ LOGGER.log(Level.FINEST, " FE=> GSSENCRequest");
- case 'N':
- LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Refused");
+ int gssTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
+ int currentTimeout = pgStream.getNetworkTimeout();
- // Server does not support gss encryption
- if (gssEncMode.requireEncryption()) {
- throw new PSQLException(GT.tr("The server does not support GSS Encryption."),
- PSQLState.CONNECTION_REJECTED);
+ // if the current timeout is less than sslTimeout then
+ // use the smaller timeout. We could do something tricky
+ // here to not set it in that case but this is pretty readable
+ if (currentTimeout > 0 && currentTimeout < gssTimeout) {
+ gssTimeout = currentTimeout;
}
- return pgStream;
+ pgStream.setNetworkTimeout(gssTimeout);
- case 'G':
- LOGGER.log(Level.FINEST, " <=BE GSSEncryptedOk");
- try {
- AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
- MakeGSS.authenticate(true, pgStream, host, user, password,
- PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
- PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), false, // TODO: fix this
- PGProperty.JAAS_LOGIN.getBoolean(info),
- PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
- return void.class;
- });
- return pgStream;
- } catch (PSQLException ex) {
- // allow the connection to proceed
- if (gssEncMode == GSSEncMode.PREFER) {
- // we have to reconnect to continue
- return new PGStream(pgStream, connectTimeout);
- }
- }
- // fallthrough
-
- default:
- throw new PSQLException(GT.tr("An error occurred while setting up the GSS Encoded connection."),
- PSQLState.PROTOCOL_VIOLATION);
- }
- }
-
- private PGStream enableSSL(PGStream pgStream, SslMode sslMode, Properties info,
- int connectTimeout)
- throws IOException, PSQLException {
- if (sslMode == SslMode.DISABLE) {
- return pgStream;
- }
- if (sslMode == SslMode.ALLOW) {
- // Allow ==> start with plaintext, use encryption if required by server
- return pgStream;
- }
-
- LOGGER.log(Level.FINEST, " FE=> SSLRequest");
-
- int sslTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
- int currentTimeout = pgStream.getNetworkTimeout();
-
- // if the current timeout is less than sslTimeout then
- // use the smaller timeout. We could do something tricky
- // here to not set it in that case but this is pretty readable
- if (currentTimeout > 0 && currentTimeout < sslTimeout) {
- sslTimeout = currentTimeout;
- }
-
- pgStream.setNetworkTimeout(sslTimeout);
- // Send SSL request packet
- pgStream.sendInteger4(8);
- pgStream.sendInteger2(1234);
- pgStream.sendInteger2(5679);
- pgStream.flush();
-
- // Now get the response from the backend, one of N, E, S.
- int beresp = pgStream.receiveChar();
- pgStream.setNetworkTimeout(currentTimeout);
-
- switch (beresp) {
- case 'E':
- LOGGER.log(Level.FINEST, " <=BE SSLError");
-
- // Server doesn't even know about the SSL handshake protocol
- if (sslMode.requireEncryption()) {
- throw new PSQLException(GT.tr("The server does not support SSL."),
- PSQLState.CONNECTION_REJECTED);
- }
-
- // We have to reconnect to continue.
- return new PGStream(pgStream, connectTimeout);
-
- case 'N':
- LOGGER.log(Level.FINEST, " <=BE SSLRefused");
-
- // Server does not support ssl
- if (sslMode.requireEncryption()) {
- throw new PSQLException(GT.tr("The server does not support SSL."),
- PSQLState.CONNECTION_REJECTED);
- }
-
- return pgStream;
-
- case 'S':
- LOGGER.log(Level.FINEST, " <=BE SSLOk");
-
- // Server supports ssl
- MakeSSL.convert(pgStream, info);
- return pgStream;
-
- default:
- throw new PSQLException(GT.tr("An error occurred while setting up the SSL connection."),
- PSQLState.PROTOCOL_VIOLATION);
- }
- }
-
- private void sendStartupPacket(PGStream pgStream, List params)
- throws IOException {
- if (LOGGER.isLoggable(Level.FINEST)) {
- StringBuilder details = new StringBuilder();
- for (int i = 0; i < params.size(); i++) {
- if (i != 0) {
- details.append(", ");
- }
- details.append(params.get(i).toString());
- }
- LOGGER.log(Level.FINEST, " FE=> StartupPacket({0})", details);
- }
-
- // Precalculate message length and encode params.
- int length = 4 + 4;
- byte[][] encodedParams = new byte[params.size() * 2][];
- for (int i = 0; i < params.size(); i++) {
- encodedParams[i * 2] = params.get(i).getEncodedKey();
- encodedParams[i * 2 + 1] = params.get(i).getEncodedValue();
- length += encodedParams[i * 2].length + 1 + encodedParams[i * 2 + 1].length + 1;
- }
-
- length += 1; // Terminating \0
-
- // Send the startup message.
- pgStream.sendInteger4(length);
- pgStream.sendInteger2(3); // protocol major
- pgStream.sendInteger2(0); // protocol minor
- for (byte[] encodedParam : encodedParams) {
- pgStream.send(encodedParam);
- pgStream.sendChar(0);
- }
-
- pgStream.sendChar(0);
- pgStream.flush();
- }
-
- private void doAuthentication(PGStream pgStream, String host, String user, Properties info) throws IOException, SQLException {
- // Now get the response from the backend, either an error message
- // or an authentication request
-
- /* SCRAM authentication state, if used */
- ScramAuthenticator scramAuthenticator = null;
-
- authloop:
- while (true) {
+ // Send GSSEncryption request packet
+ pgStream.sendInteger4(8);
+ pgStream.sendInteger2(1234);
+ pgStream.sendInteger2(5680);
+ pgStream.flush();
+ // Now get the response from the backend, one of N, E, S.
int beresp = pgStream.receiveChar();
+ pgStream.setNetworkTimeout(currentTimeout);
+ switch (beresp) {
+ case 'E':
+ LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Error");
+
+ // Server doesn't even know about the SSL handshake protocol
+ if (gssEncMode.requireEncryption()) {
+ throw new PSQLException(GT.tr("The server does not support GSS Encoding."),
+ PSQLState.CONNECTION_REJECTED);
+ }
+
+ // We have to reconnect to continue.
+ pgStream.close();
+ return new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), connectTimeout);
+
+ case 'N':
+ LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Refused");
+
+ // Server does not support gss encryption
+ if (gssEncMode.requireEncryption()) {
+ throw new PSQLException(GT.tr("The server does not support GSS Encryption."),
+ PSQLState.CONNECTION_REJECTED);
+ }
+
+ return pgStream;
+
+ case 'G':
+ LOGGER.log(Level.FINEST, " <=BE GSSEncryptedOk");
+ try {
+ AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
+ MakeGSS.authenticate(true, pgStream, host, user, password,
+ PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
+ PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), false, // TODO: fix this
+ PGProperty.JAAS_LOGIN.getBoolean(info),
+ PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
+ return void.class;
+ });
+ return pgStream;
+ } catch (PSQLException ex) {
+ // allow the connection to proceed
+ if (gssEncMode == GSSEncMode.PREFER) {
+ // we have to reconnect to continue
+ return new PGStream(pgStream, connectTimeout);
+ }
+ }
+ // fallthrough
+
+ default:
+ throw new PSQLException(GT.tr("An error occurred while setting up the GSS Encoded connection."),
+ PSQLState.PROTOCOL_VIOLATION);
+ }
+ }
+
+ private PGStream enableSSL(PGStream pgStream, SslMode sslMode, Properties info,
+ int connectTimeout)
+ throws IOException, PSQLException {
+ if (sslMode == SslMode.DISABLE) {
+ return pgStream;
+ }
+ if (sslMode == SslMode.ALLOW) {
+ // Allow ==> start with plaintext, use encryption if required by server
+ return pgStream;
+ }
+
+ LOGGER.log(Level.FINEST, " FE=> SSLRequest");
+
+ int sslTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
+ int currentTimeout = pgStream.getNetworkTimeout();
+
+ // if the current timeout is less than sslTimeout then
+ // use the smaller timeout. We could do something tricky
+ // here to not set it in that case but this is pretty readable
+ if (currentTimeout > 0 && currentTimeout < sslTimeout) {
+ sslTimeout = currentTimeout;
+ }
+
+ pgStream.setNetworkTimeout(sslTimeout);
+ // Send SSL request packet
+ pgStream.sendInteger4(8);
+ pgStream.sendInteger2(1234);
+ pgStream.sendInteger2(5679);
+ pgStream.flush();
+
+ // Now get the response from the backend, one of N, E, S.
+ int beresp = pgStream.receiveChar();
+ pgStream.setNetworkTimeout(currentTimeout);
switch (beresp) {
- case 'E':
- // An error occurred, so pass the error message to the
- // user.
- //
- // The most common one to be thrown here is:
- // "User authentication failed"
- //
- int elen = pgStream.receiveInteger4();
+ case 'E':
+ LOGGER.log(Level.FINEST, " <=BE SSLError");
- ServerErrorMessage errorMsg =
- new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
- LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
- throw new PSQLException(errorMsg, PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
-
- case 'R':
- // Authentication request.
- // Get the message length
- int msgLen = pgStream.receiveInteger4();
-
- // Get the type of request
- int areq = pgStream.receiveInteger4();
-
- // Process the request.
- switch (areq) {
- case AUTH_REQ_MD5: {
- byte[] md5Salt = pgStream.receive(4);
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE AuthenticationReqMD5(salt={0})", Utils.toHexString(md5Salt));
+ // Server doesn't even know about the SSL handshake protocol
+ if (sslMode.requireEncryption()) {
+ throw new PSQLException(GT.tr("The server does not support SSL."),
+ PSQLState.CONNECTION_REJECTED);
}
- byte[] digest = AuthenticationPluginManager.withEncodedPassword(
- AuthenticationRequestType.MD5_PASSWORD, info,
- encodedPassword -> MD5Digest.encode(user.getBytes(StandardCharsets.UTF_8),
- encodedPassword, md5Salt)
- );
+ // We have to reconnect to continue.
+ return new PGStream(pgStream, connectTimeout);
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " FE=> Password(md5digest={0})", new String(digest, StandardCharsets.US_ASCII));
+ case 'N':
+ LOGGER.log(Level.FINEST, " <=BE SSLRefused");
+
+ // Server does not support ssl
+ if (sslMode.requireEncryption()) {
+ throw new PSQLException(GT.tr("The server does not support SSL."),
+ PSQLState.CONNECTION_REJECTED);
}
- try {
- pgStream.sendChar('p');
- pgStream.sendInteger4(4 + digest.length + 1);
- pgStream.send(digest);
- } finally {
- Arrays.fill(digest, (byte) 0);
- }
- pgStream.sendChar(0);
- pgStream.flush();
+ return pgStream;
- break;
- }
+ case 'S':
+ LOGGER.log(Level.FINEST, " <=BE SSLOk");
- case AUTH_REQ_PASSWORD: {
- LOGGER.log(Level.FINEST, "<=BE AuthenticationReqPassword");
- LOGGER.log(Level.FINEST, " FE=> Password(password=)");
+ // Server supports ssl
+ MakeSSL.convert(pgStream, info);
+ return pgStream;
- AuthenticationPluginManager.withEncodedPassword(AuthenticationRequestType.CLEARTEXT_PASSWORD, info, encodedPassword -> {
- pgStream.sendChar('p');
- pgStream.sendInteger4(4 + encodedPassword.length + 1);
- pgStream.send(encodedPassword);
- return void.class;
- });
- pgStream.sendChar(0);
- pgStream.flush();
-
- break;
- }
-
- case AUTH_REQ_GSS:
- /*
- * Use GSSAPI if requested on all platforms, via JSSE.
- *
- * Note that this is slightly different to libpq, which uses SSPI for GSSAPI where
- * supported. We prefer to use the existing Java JSSE Kerberos support rather than
- * going to native (via JNA) calls where possible, so that JSSE system properties
- * etc continue to work normally.
- *
- * Note that while SSPI is often Kerberos-based there's no guarantee it will be; it
- * may be NTLM or anything else. If the client responds to an SSPI request via
- * GSSAPI and the other end isn't using Kerberos for SSPI then authentication will
- * fail.
- */
- final String gsslib = PGProperty.GSS_LIB.getOrDefault(info);
- final boolean usespnego = PGProperty.USE_SPNEGO.getBoolean(info);
-
- /*
- * Use gssapi. If the user has specified a Kerberos server
- * name we'll always use JSSE GSSAPI.
- */
- if ("gssapi".equals(gsslib)) {
- LOGGER.log(Level.FINE, "Using JSSE GSSAPI, param gsslib=gssapi");
- }
-
- /* Use JGSS's GSSAPI for this request */
- AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
- MakeGSS.authenticate(false, pgStream, host, user, password,
- PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
- PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), usespnego,
- PGProperty.JAAS_LOGIN.getBoolean(info),
- PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
- return void.class;
- });
- break;
-
- case AUTH_REQ_GSS_CONTINUE:
- // unused
- break;
-
- case AUTH_REQ_SASL:
-
- LOGGER.log(Level.FINEST, " <=BE AuthenticationSASL");
-
- scramAuthenticator = AuthenticationPluginManager.withPassword(AuthenticationRequestType.SASL, info, password -> {
- if (password == null) {
- throw new PSQLException(
- GT.tr(
- "The server requested SCRAM-based authentication, but no password was provided."),
- PSQLState.CONNECTION_REJECTED);
- }
- if (password.length == 0) {
- throw new PSQLException(
- GT.tr(
- "The server requested SCRAM-based authentication, but the password is an empty string."),
- PSQLState.CONNECTION_REJECTED);
- }
- return new ScramAuthenticator(user, String.valueOf(password), pgStream);
- });
- scramAuthenticator.processServerMechanismsAndInit();
- scramAuthenticator.sendScramClientFirstMessage();
- // This works as follows:
- // 1. When tests is run from IDE, it is assumed SCRAM library is on the classpath
- // 2. In regular build for Java < 8 this `if` is deactivated and the code always throws
- if (false) {
- throw new PSQLException(GT.tr(
- "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)",
- areq), PSQLState.CONNECTION_REJECTED);
- }
- break;
-
- case AUTH_REQ_SASL_CONTINUE:
- scramAuthenticator.processServerFirstMessage(msgLen - 4 - 4);
- break;
-
- case AUTH_REQ_SASL_FINAL:
- scramAuthenticator.verifyServerSignature(msgLen - 4 - 4);
- break;
-
- case AUTH_REQ_OK:
- /* Cleanup after successful authentication */
- LOGGER.log(Level.FINEST, " <=BE AuthenticationOk");
- break authloop; // We're done.
-
- default:
- LOGGER.log(Level.FINEST, " <=BE AuthenticationReq (unsupported type {0})", areq);
- throw new PSQLException(GT.tr(
- "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.",
- areq), PSQLState.CONNECTION_REJECTED);
- }
-
- break;
-
- default:
- throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
- PSQLState.PROTOCOL_VIOLATION);
+ default:
+ throw new PSQLException(GT.tr("An error occurred while setting up the SSL connection."),
+ PSQLState.PROTOCOL_VIOLATION);
}
- }
- }
-
- @SuppressWarnings("deprecation")
- private void runInitialQueries(QueryExecutor queryExecutor, Properties info)
- throws SQLException {
- String assumeMinServerVersion = PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info);
- if (Utils.parseServerVersionStr(assumeMinServerVersion) >= ServerVersion.v9_0.getVersionNum()) {
- // We already sent the parameter values in the StartupMessage so skip this
- return;
}
- final int dbVersion = queryExecutor.getServerVersionNum();
+ private void sendStartupPacket(PGStream pgStream, List params)
+ throws IOException {
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ StringBuilder details = new StringBuilder();
+ for (int i = 0; i < params.size(); i++) {
+ if (i != 0) {
+ details.append(", ");
+ }
+ details.append(params.get(i).toString());
+ }
+ LOGGER.log(Level.FINEST, " FE=> StartupPacket({0})", details);
+ }
- if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
- SetupQueryRunner.run(queryExecutor, "BEGIN", false);
+ // Precalculate message length and encode params.
+ int length = 4 + 4;
+ byte[][] encodedParams = new byte[params.size() * 2][];
+ for (int i = 0; i < params.size(); i++) {
+ encodedParams[i * 2] = params.get(i).getEncodedKey();
+ encodedParams[i * 2 + 1] = params.get(i).getEncodedValue();
+ length += encodedParams[i * 2].length + 1 + encodedParams[i * 2 + 1].length + 1;
+ }
+
+ length += 1; // Terminating \0
+
+ // Send the startup message.
+ pgStream.sendInteger4(length);
+ pgStream.sendInteger2(3); // protocol major
+ pgStream.sendInteger2(0); // protocol minor
+ for (byte[] encodedParam : encodedParams) {
+ pgStream.send(encodedParam);
+ pgStream.sendChar(0);
+ }
+
+ pgStream.sendChar(0);
+ pgStream.flush();
}
- if (dbVersion >= ServerVersion.v9_0.getVersionNum()) {
- SetupQueryRunner.run(queryExecutor, "SET extra_float_digits = 3", false);
+ private void doAuthentication(PGStream pgStream, String host, String user, Properties info) throws IOException, SQLException {
+ // Now get the response from the backend, either an error message
+ // or an authentication request
+
+ /* SCRAM authentication state, if used */
+ ScramAuthenticator scramAuthenticator = null;
+
+ authloop:
+ while (true) {
+ int beresp = pgStream.receiveChar();
+
+ switch (beresp) {
+ case 'E':
+ // An error occurred, so pass the error message to the
+ // user.
+ //
+ // The most common one to be thrown here is:
+ // "User authentication failed"
+ //
+ int elen = pgStream.receiveInteger4();
+
+ ServerErrorMessage errorMsg =
+ new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
+ LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
+ throw new PSQLException(errorMsg, PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
+
+ case 'R':
+ // Authentication request.
+ // Get the message length
+ int msgLen = pgStream.receiveInteger4();
+
+ // Get the type of request
+ int areq = pgStream.receiveInteger4();
+
+ // Process the request.
+ switch (areq) {
+ case AUTH_REQ_MD5: {
+ byte[] md5Salt = pgStream.receive(4);
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationReqMD5(salt={0})", Utils.toHexString(md5Salt));
+ }
+
+ byte[] digest = AuthenticationPluginManager.withEncodedPassword(
+ AuthenticationRequestType.MD5_PASSWORD, info,
+ encodedPassword -> MD5Digest.encode(user.getBytes(StandardCharsets.UTF_8),
+ encodedPassword, md5Salt)
+ );
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " FE=> Password(md5digest={0})", new String(digest, StandardCharsets.US_ASCII));
+ }
+
+ try {
+ pgStream.sendChar('p');
+ pgStream.sendInteger4(4 + digest.length + 1);
+ pgStream.send(digest);
+ } finally {
+ Arrays.fill(digest, (byte) 0);
+ }
+ pgStream.sendChar(0);
+ pgStream.flush();
+
+ break;
+ }
+
+ case AUTH_REQ_PASSWORD: {
+ LOGGER.log(Level.FINEST, "<=BE AuthenticationReqPassword");
+ LOGGER.log(Level.FINEST, " FE=> Password(password=)");
+
+ AuthenticationPluginManager.withEncodedPassword(AuthenticationRequestType.CLEARTEXT_PASSWORD, info, encodedPassword -> {
+ pgStream.sendChar('p');
+ pgStream.sendInteger4(4 + encodedPassword.length + 1);
+ pgStream.send(encodedPassword);
+ return void.class;
+ });
+ pgStream.sendChar(0);
+ pgStream.flush();
+
+ break;
+ }
+
+ case AUTH_REQ_GSS:
+ /*
+ * Use GSSAPI if requested on all platforms, via JSSE.
+ *
+ * Note that this is slightly different to libpq, which uses SSPI for GSSAPI where
+ * supported. We prefer to use the existing Java JSSE Kerberos support rather than
+ * going to native (via JNA) calls where possible, so that JSSE system properties
+ * etc continue to work normally.
+ *
+ * Note that while SSPI is often Kerberos-based there's no guarantee it will be; it
+ * may be NTLM or anything else. If the client responds to an SSPI request via
+ * GSSAPI and the other end isn't using Kerberos for SSPI then authentication will
+ * fail.
+ */
+ final String gsslib = PGProperty.GSS_LIB.getOrDefault(info);
+ final boolean usespnego = PGProperty.USE_SPNEGO.getBoolean(info);
+
+ /*
+ * Use gssapi. If the user has specified a Kerberos server
+ * name we'll always use JSSE GSSAPI.
+ */
+ if ("gssapi".equals(gsslib)) {
+ LOGGER.log(Level.FINE, "Using JSSE GSSAPI, param gsslib=gssapi");
+ }
+
+ /* Use JGSS's GSSAPI for this request */
+ AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
+ MakeGSS.authenticate(false, pgStream, host, user, password,
+ PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
+ PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), usespnego,
+ PGProperty.JAAS_LOGIN.getBoolean(info),
+ PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
+ return void.class;
+ });
+ break;
+
+ case AUTH_REQ_GSS_CONTINUE:
+ // unused
+ break;
+
+ case AUTH_REQ_SASL:
+
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationSASL");
+
+ scramAuthenticator = AuthenticationPluginManager.withPassword(AuthenticationRequestType.SASL, info, password -> {
+ if (password == null) {
+ throw new PSQLException(
+ GT.tr(
+ "The server requested SCRAM-based authentication, but no password was provided."),
+ PSQLState.CONNECTION_REJECTED);
+ }
+ if (password.length == 0) {
+ throw new PSQLException(
+ GT.tr(
+ "The server requested SCRAM-based authentication, but the password is an empty string."),
+ PSQLState.CONNECTION_REJECTED);
+ }
+ return new ScramAuthenticator(user, String.valueOf(password), pgStream);
+ });
+ scramAuthenticator.processServerMechanismsAndInit();
+ scramAuthenticator.sendScramClientFirstMessage();
+ // This works as follows:
+ // 1. When tests is run from IDE, it is assumed SCRAM library is on the classpath
+ // 2. In regular build for Java < 8 this `if` is deactivated and the code always throws
+ if (false) {
+ throw new PSQLException(GT.tr(
+ "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)",
+ areq), PSQLState.CONNECTION_REJECTED);
+ }
+ break;
+
+ case AUTH_REQ_SASL_CONTINUE:
+ scramAuthenticator.processServerFirstMessage(msgLen - 4 - 4);
+ break;
+
+ case AUTH_REQ_SASL_FINAL:
+ scramAuthenticator.verifyServerSignature(msgLen - 4 - 4);
+ break;
+
+ case AUTH_REQ_OK:
+ /* Cleanup after successful authentication */
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationOk");
+ break authloop; // We're done.
+
+ default:
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationReq (unsupported type {0})", areq);
+ throw new PSQLException(GT.tr(
+ "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.",
+ areq), PSQLState.CONNECTION_REJECTED);
+ }
+
+ break;
+
+ default:
+ throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
+ PSQLState.PROTOCOL_VIOLATION);
+ }
+ }
}
- String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
- if (appName != null && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
- StringBuilder sql = new StringBuilder();
- sql.append("SET application_name = '");
- Utils.escapeLiteral(sql, appName, queryExecutor.getStandardConformingStrings());
- sql.append("'");
- SetupQueryRunner.run(queryExecutor, sql.toString(), false);
+ @SuppressWarnings("deprecation")
+ private void runInitialQueries(QueryExecutor queryExecutor, Properties info)
+ throws SQLException {
+ String assumeMinServerVersion = PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info);
+ if (Utils.parseServerVersionStr(assumeMinServerVersion) >= ServerVersion.v9_0.getVersionNum()) {
+ // We already sent the parameter values in the StartupMessage so skip this
+ return;
+ }
+
+ final int dbVersion = queryExecutor.getServerVersionNum();
+
+ if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+ SetupQueryRunner.run(queryExecutor, "BEGIN", false);
+ }
+
+ if (dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+ SetupQueryRunner.run(queryExecutor, "SET extra_float_digits = 3", false);
+ }
+
+ String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
+ if (appName != null && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+ StringBuilder sql = new StringBuilder();
+ sql.append("SET application_name = '");
+ Utils.escapeLiteral(sql, appName, queryExecutor.getStandardConformingStrings());
+ sql.append("'");
+ SetupQueryRunner.run(queryExecutor, sql.toString(), false);
+ }
+
+ if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+ SetupQueryRunner.run(queryExecutor, "COMMIT", false);
+ }
}
- if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
- SetupQueryRunner.run(queryExecutor, "COMMIT", false);
+ /**
+ * Since PG14 there is GUC_REPORT ParamStatus {@code in_hot_standby} which is set to "on"
+ * when the server is in archive recovery or standby mode. In driver's lingo such server is called
+ * {@link org.postgresql.hostchooser.HostRequirement#secondary}.
+ * Previously {@code transaction_read_only} was used as a workable substitute.
+ * However {@code transaction_read_only} could have been manually overridden on the primary server
+ * by database user leading to a false positives: ie server is effectively read-only but
+ * technically is "primary" (not in a recovery/standby mode).
+ *
+ *
This method checks whether {@code in_hot_standby} GUC was reported by the server
+ * during initial connection:
+ *
+ *
+ *
{@code in_hot_standby} was reported and the value was "on" then the server is a replica
+ * and database is read-only by definition, false is returned.
+ *
{@code in_hot_standby} was reported and the value was "off"
+ * then the server is indeed primary but database may be in
+ * read-only mode nevertheless. We proceed to conservatively {@code show transaction_read_only}
+ * since users may not be expecting a readonly connection for {@code targetServerType=primary}
+ *
If {@code in_hot_standby} has not been reported we fallback to pre v14 behavior.
+ *
+ *
+ *
Do not confuse {@code hot_standby} and {@code in_hot_standby} ParamStatuses
+ *
+ * @see GUC_REPORT documentation
+ * @see Hot standby documentation
+ * @see in_hot_standby patch thread v10
+ * @see in_hot_standby patch thread v14
+ */
+ private boolean isPrimary(QueryExecutor queryExecutor) throws SQLException, IOException {
+ String inHotStandby = queryExecutor.getParameterStatus(IN_HOT_STANDBY);
+ if ("on".equalsIgnoreCase(inHotStandby)) {
+ return false;
+ }
+ Tuple results = SetupQueryRunner.run(queryExecutor, "show transaction_read_only", true);
+ Tuple nonNullResults = results;
+ String queriedTransactionReadonly = queryExecutor.getEncoding().decode(nonNullResults.get(0));
+ return "off".equalsIgnoreCase(queriedTransactionReadonly);
}
- }
- /**
- * Since PG14 there is GUC_REPORT ParamStatus {@code in_hot_standby} which is set to "on"
- * when the server is in archive recovery or standby mode. In driver's lingo such server is called
- * {@link org.postgresql.hostchooser.HostRequirement#secondary}.
- * Previously {@code transaction_read_only} was used as a workable substitute.
- * However {@code transaction_read_only} could have been manually overridden on the primary server
- * by database user leading to a false positives: ie server is effectively read-only but
- * technically is "primary" (not in a recovery/standby mode).
- *
- *
This method checks whether {@code in_hot_standby} GUC was reported by the server
- * during initial connection:
- *
- *
- *
{@code in_hot_standby} was reported and the value was "on" then the server is a replica
- * and database is read-only by definition, false is returned.
- *
{@code in_hot_standby} was reported and the value was "off"
- * then the server is indeed primary but database may be in
- * read-only mode nevertheless. We proceed to conservatively {@code show transaction_read_only}
- * since users may not be expecting a readonly connection for {@code targetServerType=primary}
- *
If {@code in_hot_standby} has not been reported we fallback to pre v14 behavior.
- *
- *
- *
Do not confuse {@code hot_standby} and {@code in_hot_standby} ParamStatuses
* CopyManager.copyIn() ->QueryExecutor.startCopy() - sends given query to server
* ->processCopyResults(): - receives CopyInResponse from Server - creates new CopyInImpl
* ->initCopy(): - receives copy metadata from server ->CopyInImpl.init() ->lock()
@@ -34,32 +33,32 @@ import java.sql.SQLException;
*/
public class CopyInImpl extends CopyOperationImpl implements CopyIn {
- public CopyInImpl() {
- }
+ public CopyInImpl() {
+ }
- @Override
- public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
- getQueryExecutor().writeToCopy(this, data, off, siz);
- }
+ @Override
+ public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
+ getQueryExecutor().writeToCopy(this, data, off, siz);
+ }
- @Override
- public void writeToCopy(ByteStreamWriter from) throws SQLException {
- getQueryExecutor().writeToCopy(this, from);
- }
+ @Override
+ public void writeToCopy(ByteStreamWriter from) throws SQLException {
+ getQueryExecutor().writeToCopy(this, from);
+ }
- @Override
- public void flushCopy() throws SQLException {
- getQueryExecutor().flushCopy(this);
- }
+ @Override
+ public void flushCopy() throws SQLException {
+ getQueryExecutor().flushCopy(this);
+ }
- @Override
- public long endCopy() throws SQLException {
- return getQueryExecutor().endCopy(this);
- }
+ @Override
+ public long endCopy() throws SQLException {
+ return getQueryExecutor().endCopy(this);
+ }
- @Override
- protected void handleCopydata(byte[] data) throws PSQLException {
- throw new PSQLException(GT.tr("CopyIn copy direction can't receive data"),
- PSQLState.PROTOCOL_VIOLATION);
- }
+ @Override
+ protected void handleCopydata(byte[] data) throws PSQLException {
+ throw new PSQLException(GT.tr("CopyIn copy direction can't receive data"),
+ PSQLState.PROTOCOL_VIOLATION);
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java
index 680c6d2..cd4c2ed 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java
@@ -5,77 +5,76 @@
package org.postgresql.core.v3;
+import java.sql.SQLException;
import org.postgresql.copy.CopyOperation;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
-import java.sql.SQLException;
-
public abstract class CopyOperationImpl implements CopyOperation {
- QueryExecutorImpl queryExecutor;
- int rowFormat;
- int [] fieldFormats;
- long handledRowCount = -1;
+ QueryExecutorImpl queryExecutor;
+ int rowFormat;
+ int[] fieldFormats;
+ long handledRowCount = -1;
- public CopyOperationImpl() {
- }
-
- void init(QueryExecutorImpl q, int fmt, int[] fmts) {
- queryExecutor = q;
- rowFormat = fmt;
- fieldFormats = fmts;
- }
-
- protected QueryExecutorImpl getQueryExecutor() {
- return queryExecutor;
- }
-
- @Override
- public void cancelCopy() throws SQLException {
- queryExecutor.cancelCopy(this);
- }
-
- @Override
- public int getFieldCount() {
- return fieldFormats.length;
- }
-
- @Override
- public int getFieldFormat(int field) {
- return fieldFormats[field];
- }
-
- @Override
- public int getFormat() {
- return rowFormat;
- }
-
- @Override
- public boolean isActive() {
- return queryExecutor.hasLockOn(this);
- }
-
- public void handleCommandStatus(String status) throws PSQLException {
- if (status.startsWith("COPY")) {
- int i = status.lastIndexOf(' ');
- handledRowCount = i > 3 ? Long.parseLong(status.substring(i + 1)) : -1;
- } else {
- throw new PSQLException(GT.tr("CommandComplete expected COPY but got: " + status),
- PSQLState.COMMUNICATION_ERROR);
+ public CopyOperationImpl() {
}
- }
- /**
- * Consume received copy data.
- *
- * @param data data that was receive by copy protocol
- * @throws PSQLException if some internal problem occurs
- */
- protected abstract void handleCopydata(byte[] data) throws PSQLException;
+ void init(QueryExecutorImpl q, int fmt, int[] fmts) {
+ queryExecutor = q;
+ rowFormat = fmt;
+ fieldFormats = fmts;
+ }
- @Override
- public long getHandledRowCount() {
- return handledRowCount;
- }
+ protected QueryExecutorImpl getQueryExecutor() {
+ return queryExecutor;
+ }
+
+ @Override
+ public void cancelCopy() throws SQLException {
+ queryExecutor.cancelCopy(this);
+ }
+
+ @Override
+ public int getFieldCount() {
+ return fieldFormats.length;
+ }
+
+ @Override
+ public int getFieldFormat(int field) {
+ return fieldFormats[field];
+ }
+
+ @Override
+ public int getFormat() {
+ return rowFormat;
+ }
+
+ @Override
+ public boolean isActive() {
+ return queryExecutor.hasLockOn(this);
+ }
+
+ public void handleCommandStatus(String status) throws PSQLException {
+ if (status.startsWith("COPY")) {
+ int i = status.lastIndexOf(' ');
+ handledRowCount = i > 3 ? Long.parseLong(status.substring(i + 1)) : -1;
+ } else {
+ throw new PSQLException(GT.tr("CommandComplete expected COPY but got: " + status),
+ PSQLState.COMMUNICATION_ERROR);
+ }
+ }
+
+ /**
+ * Consume received copy data.
+ *
+ * @param data data that was receive by copy protocol
+ * @throws PSQLException if some internal problem occurs
+ */
+ protected abstract void handleCopydata(byte[] data) throws PSQLException;
+
+ @Override
+ public long getHandledRowCount() {
+ return handledRowCount;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java
index f7898bf..d0faa4f 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java
@@ -5,9 +5,8 @@
package org.postgresql.core.v3;
-import org.postgresql.copy.CopyOut;
-
import java.sql.SQLException;
+import org.postgresql.copy.CopyOut;
/**
*
Anticipated flow of a COPY TO STDOUT operation:
@@ -24,25 +23,25 @@ import java.sql.SQLException;
* <-returned: byte array of data received from server or null at end.
*/
public class CopyOutImpl extends CopyOperationImpl implements CopyOut {
- private byte [] currentDataRow;
+ private byte[] currentDataRow;
- public CopyOutImpl() {
- }
+ public CopyOutImpl() {
+ }
- @Override
- public byte [] readFromCopy() throws SQLException {
- return readFromCopy(true);
- }
+ @Override
+ public byte[] readFromCopy() throws SQLException {
+ return readFromCopy(true);
+ }
- @Override
- public byte [] readFromCopy(boolean block) throws SQLException {
- currentDataRow = null;
- getQueryExecutor().readFromCopy(this, block);
- return currentDataRow;
- }
+ @Override
+ public byte[] readFromCopy(boolean block) throws SQLException {
+ currentDataRow = null;
+ getQueryExecutor().readFromCopy(this, block);
+ return currentDataRow;
+ }
- @Override
- protected void handleCopydata(byte[] data) {
- currentDataRow = data;
- }
+ @Override
+ protected void handleCopydata(byte[] data) {
+ currentDataRow = data;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java b/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java
index 14a0fef..3853dcc 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java
@@ -7,19 +7,18 @@ package org.postgresql.core.v3;
/**
* Information for "pending describe queue".
- *
*/
class DescribeRequest {
- public final SimpleQuery query;
- public final SimpleParameterList parameterList;
- public final boolean describeOnly;
- public final String statementName;
+ public final SimpleQuery query;
+ public final SimpleParameterList parameterList;
+ public final boolean describeOnly;
+ public final String statementName;
- DescribeRequest(SimpleQuery query, SimpleParameterList parameterList,
- boolean describeOnly, String statementName) {
- this.query = query;
- this.parameterList = parameterList;
- this.describeOnly = describeOnly;
- this.statementName = statementName;
- }
+ DescribeRequest(SimpleQuery query, SimpleParameterList parameterList,
+ boolean describeOnly, String statementName) {
+ this.query = query;
+ this.parameterList = parameterList;
+ this.describeOnly = describeOnly;
+ this.statementName = statementName;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java b/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java
index e01190b..2d58827 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java
@@ -7,16 +7,15 @@ package org.postgresql.core.v3;
/**
* Information for "pending execute queue".
- *
*/
class ExecuteRequest {
- public final SimpleQuery query;
- public final Portal portal;
- public final boolean asSimple;
+ public final SimpleQuery query;
+ public final Portal portal;
+ public final boolean asSimple;
- ExecuteRequest(SimpleQuery query, Portal portal, boolean asSimple) {
- this.query = query;
- this.portal = portal;
- this.asSimple = asSimple;
- }
+ ExecuteRequest(SimpleQuery query, Portal portal, boolean asSimple) {
+ this.query = query;
+ this.portal = portal;
+ this.asSimple = asSimple;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java b/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java
index 1300355..bc30745 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java
@@ -6,10 +6,9 @@
package org.postgresql.core.v3;
-import org.postgresql.core.ResultCursor;
-
import java.lang.ref.PhantomReference;
import java.nio.charset.StandardCharsets;
+import org.postgresql.core.ResultCursor;
/**
* V3 ResultCursor implementation in terms of backend Portals. This holds the state of a single
@@ -18,51 +17,51 @@ import java.nio.charset.StandardCharsets;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class Portal implements ResultCursor {
- Portal(SimpleQuery query, String portalName) {
- this.query = query;
- this.portalName = portalName;
- this.encodedName = portalName.getBytes(StandardCharsets.UTF_8);
- }
+ private final SimpleQuery query;
+ private final String portalName;
+ private final byte[] encodedName;
+ private PhantomReference> cleanupRef;
- @Override
- public void close() {
- PhantomReference> cleanupRef = this.cleanupRef;
- if (cleanupRef != null) {
- cleanupRef.clear();
- cleanupRef.enqueue();
- this.cleanupRef = null;
+ Portal(SimpleQuery query, String portalName) {
+ this.query = query;
+ this.portalName = portalName;
+ this.encodedName = portalName.getBytes(StandardCharsets.UTF_8);
}
- }
- String getPortalName() {
- return portalName;
- }
+ @Override
+ public void close() {
+ PhantomReference> cleanupRef = this.cleanupRef;
+ if (cleanupRef != null) {
+ cleanupRef.clear();
+ cleanupRef.enqueue();
+ this.cleanupRef = null;
+ }
+ }
- byte[] getEncodedPortalName() {
- return encodedName;
- }
+ String getPortalName() {
+ return portalName;
+ }
- SimpleQuery getQuery() {
- return query;
- }
+ // Holding on to a reference to the generating query has
+ // the nice side-effect that while this Portal is referenced,
+ // so is the SimpleQuery, so the underlying statement won't
+ // be closed while the portal is open (the backend closes
+ // all open portals when the statement is closed)
- void setCleanupRef(PhantomReference> cleanupRef) {
- this.cleanupRef = cleanupRef;
- }
+ byte[] getEncodedPortalName() {
+ return encodedName;
+ }
- @Override
- public String toString() {
- return portalName;
- }
+ SimpleQuery getQuery() {
+ return query;
+ }
- // Holding on to a reference to the generating query has
- // the nice side-effect that while this Portal is referenced,
- // so is the SimpleQuery, so the underlying statement won't
- // be closed while the portal is open (the backend closes
- // all open portals when the statement is closed)
+ void setCleanupRef(PhantomReference> cleanupRef) {
+ this.cleanupRef = cleanupRef;
+ }
- private final SimpleQuery query;
- private final String portalName;
- private final byte[] encodedName;
- private PhantomReference> cleanupRef;
+ @Override
+ public String toString() {
+ return portalName;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java
index e1e12b6..f2b2f2c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java
@@ -6,6 +6,32 @@
package org.postgresql.core.v3;
+import java.io.IOException;
+import java.lang.ref.PhantomReference;
+import java.lang.ref.Reference;
+import java.lang.ref.ReferenceQueue;
+import java.net.Socket;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TimeZone;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import org.postgresql.PGProperty;
import org.postgresql.copy.CopyIn;
import org.postgresql.copy.CopyOperation;
@@ -46,3057 +72,3006 @@ import org.postgresql.util.PSQLState;
import org.postgresql.util.PSQLWarning;
import org.postgresql.util.ServerErrorMessage;
-import java.io.IOException;
-import java.lang.ref.PhantomReference;
-import java.lang.ref.Reference;
-import java.lang.ref.ReferenceQueue;
-import java.net.Socket;
-import java.net.SocketException;
-import java.net.SocketTimeoutException;
-import java.nio.charset.StandardCharsets;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
/**
* QueryExecutor implementation for the V3 protocol.
*/
@SuppressWarnings("try")
public class QueryExecutorImpl extends QueryExecutorBase {
- private static final Logger LOGGER = Logger.getLogger(QueryExecutorImpl.class.getName());
+ private static final Logger LOGGER = Logger.getLogger(QueryExecutorImpl.class.getName());
- private static final Field[] NO_FIELDS = new Field[0];
+ private static final Field[] NO_FIELDS = new Field[0];
+ // Deadlock avoidance:
+ //
+ // It's possible for the send and receive streams to get "deadlocked" against each other since
+ // we do not have a separate thread. The scenario is this: we have two streams:
+ //
+ // driver -> TCP buffering -> server
+ // server -> TCP buffering -> driver
+ //
+ // The server behaviour is roughly:
+ // while true:
+ // read message
+ // execute message
+ // write results
+ //
+ // If the server -> driver stream has a full buffer, the write will block.
+ // If the driver is still writing when this happens, and the driver -> server
+ // stream also fills up, we deadlock: the driver is blocked on write() waiting
+ // for the server to read some more data, and the server is blocked on write()
+ // waiting for the driver to read some more data.
+ //
+ // To avoid this, we guess at how much response data we can request from the
+ // server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES).
+ // This is the point where the server blocks on write and stops reading data. If we
+ // reach this point, we force a Sync message and read pending data from the server
+ // until ReadyForQuery, then go back to writing more queries unless we saw an error.
+ //
+ // This is not 100% reliable -- it's only done in the batch-query case and only
+ // at a reasonably high level (per query, not per message), and it's only an estimate
+ // -- so it might break. To do it correctly in all cases would seem to require a
+ // separate send or receive thread as we can only do the Sync-and-read-results
+ // operation at particular points, and also as we don't really know how much data
+ // the server is sending.
+ //
+ // Our message size estimation is coarse, and disregards asynchronous
+ // notifications, warnings/info/debug messages, etc, so the response size may be
+ // quite different from the 250 bytes assumed here even for queries that don't
+ // return data.
+ //
+ // See github issue #194 and #195 .
+ //
+ // Assume 64k server->client buffering, which is extremely conservative. A typical
+ // system will have 200kb or more of buffers for its receive buffers, and the sending
+ // system will typically have the same on the send side, giving us 400kb or to work
+ // with. (We could check Java's receive buffer size, but prefer to assume a very
+ // conservative buffer instead, and we don't know how big the server's send
+ // buffer is.)
+ //
+ private static final int MAX_BUFFERED_RECV_BYTES = 64000;
+ private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250;
+ private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed");
- static {
- //canonicalize commonly seen strings to reduce memory and speed comparisons
- Encoding.canonicalize("application_name");
- Encoding.canonicalize("client_encoding");
- Encoding.canonicalize("DateStyle");
- Encoding.canonicalize("integer_datetimes");
- Encoding.canonicalize("off");
- Encoding.canonicalize("on");
- Encoding.canonicalize("server_encoding");
- Encoding.canonicalize("server_version");
- Encoding.canonicalize("server_version_num");
- Encoding.canonicalize("standard_conforming_strings");
- Encoding.canonicalize("TimeZone");
- Encoding.canonicalize("UTF8");
- Encoding.canonicalize("UTF-8");
- Encoding.canonicalize("in_hot_standby");
- }
-
- /**
- * TimeZone of the current connection (TimeZone backend parameter).
- */
- private TimeZone timeZone;
-
- /**
- * application_name connection property.
- */
- private String applicationName;
-
- /**
- * True if server uses integers for date and time fields. False if server uses double.
- */
- private boolean integerDateTimes;
-
- /**
- * Bit set that has a bit set for each oid which should be received using binary format.
- */
- private final Set useBinaryReceiveForOids = new HashSet<>();
-
- /**
- * Bit set that has a bit set for each oid which should be sent using binary format.
- */
- private final Set useBinarySendForOids = new HashSet<>();
-
- /**
- * This is a fake query object so processResults can distinguish "ReadyForQuery" messages
- * from Sync messages vs from simple execute (aka 'Q').
- */
- private final SimpleQuery sync;
-
- private short deallocateEpoch;
-
- /**
- * This caches the latest observed {@code set search_path} query so the reset of prepared
- * statement cache can be skipped if using repeated calls for the same {@code set search_path}
- * value.
- */
- private String lastSetSearchPathQuery;
-
- /**
- * The exception that caused the last transaction to fail.
- */
- private SQLException transactionFailCause;
-
- private final ReplicationProtocol replicationProtocol;
-
- /**
- * {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those
- */
- private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser();
-
- private final AdaptiveFetchCache adaptiveFetchCache;
-
- @SuppressWarnings("this-escape")
- public QueryExecutorImpl(PGStream pgStream,
- int cancelSignalTimeout, Properties info) throws SQLException, IOException {
- super(pgStream, cancelSignalTimeout, info);
-
- this.sync = (SimpleQuery) createQuery("SYNC", false, true).query;
-
- long maxResultBuffer = pgStream.getMaxResultBuffer();
- this.adaptiveFetchCache = new AdaptiveFetchCache(maxResultBuffer, info);
-
- this.allowEncodingChanges = PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(info);
- this.cleanupSavePoints = PGProperty.CLEANUP_SAVEPOINTS.getBoolean(info);
- // assignment, argument
- this.replicationProtocol = new V3ReplicationProtocol(this, pgStream);
- readStartupMessages();
- }
-
- @Override
- public int getProtocolVersion() {
- return 3;
- }
-
- /**
- *
Supplement to synchronization of public methods on current QueryExecutor.
- *
- *
Necessary for keeping the connection intact between calls to public methods sharing a state
- * such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access
- * point.
- *
- *
Public methods sharing that state must then be synchronized among themselves. Normal method
- * synchronization typically suffices for that.
- *
- *
See notes on related methods as well as currentCopy() below.
- */
- private Object lockedFor;
-
- /**
- * Obtain lock over this connection for given object, blocking to wait if necessary.
- *
- * @param obtainer object that gets the lock. Normally current thread.
- * @throws PSQLException when already holding the lock or getting interrupted.
- */
- private void lock(Object obtainer) throws PSQLException {
- if (lockedFor == obtainer) {
- throw new PSQLException(GT.tr("Tried to obtain lock while already holding it"),
- PSQLState.OBJECT_NOT_IN_STATE);
-
- }
- waitOnLock();
- lockedFor = obtainer;
- }
-
- /**
- * Release lock on this connection presumably held by given object.
- *
- * @param holder object that holds the lock. Normally current thread.
- * @throws PSQLException when this thread does not hold the lock
- */
- private void unlock(Object holder) throws PSQLException {
- if (lockedFor != holder) {
- throw new PSQLException(GT.tr("Tried to break lock on database connection"),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
- lockedFor = null;
- lockCondition.signal();
- }
-
- /**
- * Wait until our lock is released. Execution of a single synchronized method can then continue
- * without further ado. Must be called at beginning of each synchronized public method.
- */
- private void waitOnLock() throws PSQLException {
- while (lockedFor != null) {
- try {
- lockCondition.await();
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- throw new PSQLException(
- GT.tr("Interrupted while waiting to obtain lock on database connection"),
- PSQLState.OBJECT_NOT_IN_STATE, ie);
- }
- }
- }
-
- /**
- * @param holder object assumed to hold the lock
- * @return whether given object actually holds the lock
- */
- boolean hasLockOn(Object holder) {
- try (ResourceLock ignore = lock.obtain()) {
- return lockedFor == holder;
- }
- }
-
- /**
- * @param holder object assumed to hold the lock
- * @return whether given object actually holds the lock
- */
- private boolean hasLock(Object holder) {
- return lockedFor == holder;
- }
-
- //
- // Query parsing
- //
-
- @Override
- public Query createSimpleQuery(String sql) throws SQLException {
- List queries = Parser.parseJdbcSql(sql,
- getStandardConformingStrings(), false, true,
- isReWriteBatchedInsertsEnabled(), getQuoteReturningIdentifiers());
- return wrap(queries);
- }
-
- @Override
- public Query wrap(List queries) {
- if (queries.isEmpty()) {
- // Empty query
- return emptyQuery;
- }
- if (queries.size() == 1) {
- NativeQuery firstQuery = queries.get(0);
- if (isReWriteBatchedInsertsEnabled()
- && firstQuery.getCommand().isBatchedReWriteCompatible()) {
- int valuesBraceOpenPosition =
- firstQuery.getCommand().getBatchRewriteValuesBraceOpenPosition();
- int valuesBraceClosePosition =
- firstQuery.getCommand().getBatchRewriteValuesBraceClosePosition();
- return new BatchedQuery(firstQuery, this, valuesBraceOpenPosition,
- valuesBraceClosePosition, isColumnSanitiserDisabled());
- } else {
- return new SimpleQuery(firstQuery, this, isColumnSanitiserDisabled());
- }
+ static {
+ //canonicalize commonly seen strings to reduce memory and speed comparisons
+ Encoding.canonicalize("application_name");
+ Encoding.canonicalize("client_encoding");
+ Encoding.canonicalize("DateStyle");
+ Encoding.canonicalize("integer_datetimes");
+ Encoding.canonicalize("off");
+ Encoding.canonicalize("on");
+ Encoding.canonicalize("server_encoding");
+ Encoding.canonicalize("server_version");
+ Encoding.canonicalize("server_version_num");
+ Encoding.canonicalize("standard_conforming_strings");
+ Encoding.canonicalize("TimeZone");
+ Encoding.canonicalize("UTF8");
+ Encoding.canonicalize("UTF-8");
+ Encoding.canonicalize("in_hot_standby");
}
- // Multiple statements.
- SimpleQuery[] subqueries = new SimpleQuery[queries.size()];
- int[] offsets = new int[subqueries.length];
- int offset = 0;
- for (int i = 0; i < queries.size(); i++) {
- NativeQuery nativeQuery = queries.get(i);
- offsets[i] = offset;
- subqueries[i] = new SimpleQuery(nativeQuery, this, isColumnSanitiserDisabled());
- offset += nativeQuery.bindPositions.length;
+ /**
+ * Bit set that has a bit set for each oid which should be received using binary format.
+ */
+ private final Set useBinaryReceiveForOids = new HashSet<>();
+
+ /**
+ * Bit set that has a bit set for each oid which should be sent using binary format.
+ */
+ private final Set useBinarySendForOids = new HashSet<>();
+
+ /**
+ * This is a fake query object so processResults can distinguish "ReadyForQuery" messages
+ * from Sync messages vs from simple execute (aka 'Q').
+ */
+ private final SimpleQuery sync;
+ private final ReplicationProtocol replicationProtocol;
+ /**
+ * {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those
+ */
+ private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser();
+ private final AdaptiveFetchCache adaptiveFetchCache;
+ private final HashMap, String> parsedQueryMap =
+ new HashMap<>();
+ private final ReferenceQueue parsedQueryCleanupQueue =
+ new ReferenceQueue<>();
+ private final HashMap, String> openPortalMap =
+ new HashMap<>();
+ private final ReferenceQueue openPortalCleanupQueue = new ReferenceQueue<>();
+ private final Deque pendingParseQueue = new ArrayDeque<>();
+ private final Deque pendingBindQueue = new ArrayDeque<>();
+ private final Deque pendingExecuteQueue = new ArrayDeque<>();
+ private final Deque pendingDescribeStatementQueue =
+ new ArrayDeque<>();
+ private final Deque pendingDescribePortalQueue = new ArrayDeque<>();
+ private final boolean allowEncodingChanges;
+ private final boolean cleanupSavePoints;
+
+ //
+ // Query parsing
+ //
+ private final SimpleQuery beginTransactionQuery =
+ new SimpleQuery(
+ new NativeQuery("BEGIN", null, false, SqlCommand.BLANK),
+ null, false);
+ private final SimpleQuery beginReadOnlyTransactionQuery =
+ new SimpleQuery(
+ new NativeQuery("BEGIN READ ONLY", null, false, SqlCommand.BLANK),
+ null, false);
+
+ //
+ // Query execution
+ //
+ private final SimpleQuery emptyQuery =
+ new SimpleQuery(
+ new NativeQuery("", null, false,
+ SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)
+ ), null, false);
+ private final SimpleQuery autoSaveQuery =
+ new SimpleQuery(
+ new NativeQuery("SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
+ null, false);
+ private final SimpleQuery releaseAutoSave =
+ new SimpleQuery(
+ new NativeQuery("RELEASE SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
+ null, false);
+ /*
+ In autosave mode we use this query to roll back errored transactions
+ */
+ private final SimpleQuery restoreToAutoSave =
+ new SimpleQuery(
+ new NativeQuery("ROLLBACK TO SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
+ null, false);
+ AtomicBoolean processingCopyResults = new AtomicBoolean(false);
+ /**
+ * TimeZone of the current connection (TimeZone backend parameter).
+ */
+ private TimeZone timeZone;
+ /**
+ * application_name connection property.
+ */
+ private String applicationName;
+ /**
+ * True if server uses integers for date and time fields. False if server uses double.
+ */
+ private boolean integerDateTimes;
+ private short deallocateEpoch;
+ /**
+ * This caches the latest observed {@code set search_path} query so the reset of prepared
+ * statement cache can be skipped if using repeated calls for the same {@code set search_path}
+ * value.
+ */
+ private String lastSetSearchPathQuery;
+ /**
+ * The exception that caused the last transaction to fail.
+ */
+ private SQLException transactionFailCause;
+
+ //
+ // Fastpath
+ //
+ /**
+ *
Supplement to synchronization of public methods on current QueryExecutor.
+ *
+ *
Necessary for keeping the connection intact between calls to public methods sharing a state
+ * such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access
+ * point.
+ *
+ *
Public methods sharing that state must then be synchronized among themselves. Normal method
+ * synchronization typically suffices for that.
+ *
+ *
See notes on related methods as well as currentCopy() below.
The estimated server response size since we last consumed the input stream from the server, in
+ * bytes.
+ *
+ *
Starts at zero, reset by every Sync message. Mainly used for batches.
+ *
+ *
Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.
+ */
+ private int estimatedReceiveBufferBytes;
+
+ @SuppressWarnings("this-escape")
+ public QueryExecutorImpl(PGStream pgStream,
+ int cancelSignalTimeout, Properties info) throws SQLException, IOException {
+ super(pgStream, cancelSignalTimeout, info);
+
+ this.sync = (SimpleQuery) createQuery("SYNC", false, true).query;
+
+ long maxResultBuffer = pgStream.getMaxResultBuffer();
+ this.adaptiveFetchCache = new AdaptiveFetchCache(maxResultBuffer, info);
+
+ this.allowEncodingChanges = PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(info);
+ this.cleanupSavePoints = PGProperty.CLEANUP_SAVEPOINTS.getBoolean(info);
+ // assignment, argument
+ this.replicationProtocol = new V3ReplicationProtocol(this, pgStream);
+ readStartupMessages();
}
- return new CompositeQuery(subqueries, offsets);
- }
-
- //
- // Query execution
- //
-
- private int updateQueryMode(int flags) {
- switch (getPreferQueryMode()) {
- case SIMPLE:
- return flags | QUERY_EXECUTE_AS_SIMPLE;
- case EXTENDED:
- return flags & ~QUERY_EXECUTE_AS_SIMPLE;
- default:
- return flags;
+ @Override
+ public int getProtocolVersion() {
+ return 3;
}
- }
- @Override
- public void execute(Query query, ParameterList parameters,
- ResultHandler handler,
- int maxRows, int fetchSize, int flags) throws SQLException {
- execute(query, parameters, handler, maxRows, fetchSize, flags, false);
- }
+ /**
+ * Obtain lock over this connection for given object, blocking to wait if necessary.
+ *
+ * @param obtainer object that gets the lock. Normally current thread.
+ * @throws PSQLException when already holding the lock or getting interrupted.
+ */
+ private void lock(Object obtainer) throws PSQLException {
+ if (lockedFor == obtainer) {
+ throw new PSQLException(GT.tr("Tried to obtain lock while already holding it"),
+ PSQLState.OBJECT_NOT_IN_STATE);
- @Override
- public void execute(Query query, ParameterList parameters,
- ResultHandler handler,
- int maxRows, int fetchSize, int flags, boolean adaptiveFetch) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- waitOnLock();
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " simple execute, handler={0}, maxRows={1}, fetchSize={2}, flags={3}",
- new Object[]{handler, maxRows, fetchSize, flags});
- }
-
- if (parameters == null) {
- parameters = SimpleQuery.NO_PARAMETERS;
- }
-
- flags = updateQueryMode(flags);
-
- boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
-
- ((V3ParameterList) parameters).convertFunctionOutParameters();
-
- // Check parameters are all set..
- if (!describeOnly) {
- ((V3ParameterList) parameters).checkAllParametersSet();
- }
-
- boolean autosave = false;
- try {
- try {
- handler = sendQueryPreamble(handler, flags);
- autosave = sendAutomaticSavepoint(query, flags);
- sendQuery(query, (V3ParameterList) parameters, maxRows, fetchSize, flags,
- handler, null, adaptiveFetch);
- if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
- // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
- // on its own
- } else {
- sendSync();
- }
- processResults(handler, flags, adaptiveFetch);
- estimatedReceiveBufferBytes = 0;
- } catch (PGBindException se) {
- // There are three causes of this error, an
- // invalid total Bind message length, a
- // BinaryStream that cannot provide the amount
- // of data claimed by the length argument, and
- // a BinaryStream that throws an Exception
- // when reading.
- //
- // We simply do not send the Execute message
- // so we can just continue on as if nothing
- // has happened. Perhaps we need to
- // introduce an error here to force the
- // caller to rollback if there is a
- // transaction in progress?
- //
- sendSync();
- processResults(handler, flags, adaptiveFetch);
- estimatedReceiveBufferBytes = 0;
- handler
- .handleError(new PSQLException(GT.tr("Unable to bind parameter values for statement."),
- PSQLState.INVALID_PARAMETER_VALUE, se.getIOException()));
}
- } catch (IOException e) {
- abort();
- handler.handleError(
- new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
- PSQLState.CONNECTION_FAILURE, e));
- }
-
- try {
- handler.handleCompletion();
- if (cleanupSavePoints) {
- releaseSavePoint(autosave, flags);
- }
- } catch (SQLException e) {
- rollbackIfRequired(autosave, e);
- }
+ waitOnLock();
+ lockedFor = obtainer;
}
- }
- private boolean sendAutomaticSavepoint(Query query, int flags) throws IOException {
- if (((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) == 0
- || getTransactionState() == TransactionState.OPEN)
- && query != restoreToAutoSave
- && !"COMMIT".equalsIgnoreCase(query.getNativeSql())
- && getAutoSave() != AutoSave.NEVER
- // If query has no resulting fields, it cannot fail with 'cached plan must not change result type'
- // thus no need to set a savepoint before such query
- && (getAutoSave() == AutoSave.ALWAYS
- // If CompositeQuery is observed, just assume it might fail and set the savepoint
- || !(query instanceof SimpleQuery)
- || ((SimpleQuery) query).getFields() != null)) {
+ /**
+ * Release lock on this connection presumably held by given object.
+ *
+ * @param holder object that holds the lock. Normally current thread.
+ * @throws PSQLException when this thread does not hold the lock
+ */
+ private void unlock(Object holder) throws PSQLException {
+ if (lockedFor != holder) {
+ throw new PSQLException(GT.tr("Tried to break lock on database connection"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ lockedFor = null;
+ lockCondition.signal();
+ }
+
+ /**
+ * Wait until our lock is released. Execution of a single synchronized method can then continue
+ * without further ado. Must be called at beginning of each synchronized public method.
+ */
+ private void waitOnLock() throws PSQLException {
+ while (lockedFor != null) {
+ try {
+ lockCondition.await();
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ throw new PSQLException(
+ GT.tr("Interrupted while waiting to obtain lock on database connection"),
+ PSQLState.OBJECT_NOT_IN_STATE, ie);
+ }
+ }
+ }
+
+ //
+ // Copy subprotocol implementation
+ //
+
+ /**
+ * @param holder object assumed to hold the lock
+ * @return whether given object actually holds the lock
+ */
+ boolean hasLockOn(Object holder) {
+ try (ResourceLock ignore = lock.obtain()) {
+ return lockedFor == holder;
+ }
+ }
+
+ /**
+ * @param holder object assumed to hold the lock
+ * @return whether given object actually holds the lock
+ */
+ private boolean hasLock(Object holder) {
+ return lockedFor == holder;
+ }
+
+ @Override
+ public Query createSimpleQuery(String sql) throws SQLException {
+ List queries = Parser.parseJdbcSql(sql,
+ getStandardConformingStrings(), false, true,
+ isReWriteBatchedInsertsEnabled(), getQuoteReturningIdentifiers());
+ return wrap(queries);
+ }
+
+ @Override
+ public Query wrap(List queries) {
+ if (queries.isEmpty()) {
+ // Empty query
+ return emptyQuery;
+ }
+ if (queries.size() == 1) {
+ NativeQuery firstQuery = queries.get(0);
+ if (isReWriteBatchedInsertsEnabled()
+ && firstQuery.getCommand().isBatchedReWriteCompatible()) {
+ int valuesBraceOpenPosition =
+ firstQuery.getCommand().getBatchRewriteValuesBraceOpenPosition();
+ int valuesBraceClosePosition =
+ firstQuery.getCommand().getBatchRewriteValuesBraceClosePosition();
+ return new BatchedQuery(firstQuery, this, valuesBraceOpenPosition,
+ valuesBraceClosePosition, isColumnSanitiserDisabled());
+ } else {
+ return new SimpleQuery(firstQuery, this, isColumnSanitiserDisabled());
+ }
+ }
+
+ // Multiple statements.
+ SimpleQuery[] subqueries = new SimpleQuery[queries.size()];
+ int[] offsets = new int[subqueries.length];
+ int offset = 0;
+ for (int i = 0; i < queries.size(); i++) {
+ NativeQuery nativeQuery = queries.get(i);
+ offsets[i] = offset;
+ subqueries[i] = new SimpleQuery(nativeQuery, this, isColumnSanitiserDisabled());
+ offset += nativeQuery.bindPositions.length;
+ }
+
+ return new CompositeQuery(subqueries, offsets);
+ }
+
+ private int updateQueryMode(int flags) {
+ switch (getPreferQueryMode()) {
+ case SIMPLE:
+ return flags | QUERY_EXECUTE_AS_SIMPLE;
+ case EXTENDED:
+ return flags & ~QUERY_EXECUTE_AS_SIMPLE;
+ default:
+ return flags;
+ }
+ }
+
+ @Override
+ public void execute(Query query, ParameterList parameters,
+ ResultHandler handler,
+ int maxRows, int fetchSize, int flags) throws SQLException {
+ execute(query, parameters, handler, maxRows, fetchSize, flags, false);
+ }
+
+ @Override
+ public void execute(Query query, ParameterList parameters,
+ ResultHandler handler,
+ int maxRows, int fetchSize, int flags, boolean adaptiveFetch) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ waitOnLock();
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " simple execute, handler={0}, maxRows={1}, fetchSize={2}, flags={3}",
+ new Object[]{handler, maxRows, fetchSize, flags});
+ }
+
+ if (parameters == null) {
+ parameters = SimpleQuery.NO_PARAMETERS;
+ }
+
+ flags = updateQueryMode(flags);
+
+ boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
+
+ ((V3ParameterList) parameters).convertFunctionOutParameters();
+
+ // Check parameters are all set..
+ if (!describeOnly) {
+ ((V3ParameterList) parameters).checkAllParametersSet();
+ }
+
+ boolean autosave = false;
+ try {
+ try {
+ handler = sendQueryPreamble(handler, flags);
+ autosave = sendAutomaticSavepoint(query, flags);
+ sendQuery(query, (V3ParameterList) parameters, maxRows, fetchSize, flags,
+ handler, null, adaptiveFetch);
+ if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
+ // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
+ // on its own
+ } else {
+ sendSync();
+ }
+ processResults(handler, flags, adaptiveFetch);
+ estimatedReceiveBufferBytes = 0;
+ } catch (PGBindException se) {
+ // There are three causes of this error, an
+ // invalid total Bind message length, a
+ // BinaryStream that cannot provide the amount
+ // of data claimed by the length argument, and
+ // a BinaryStream that throws an Exception
+ // when reading.
+ //
+ // We simply do not send the Execute message
+ // so we can just continue on as if nothing
+ // has happened. Perhaps we need to
+ // introduce an error here to force the
+ // caller to rollback if there is a
+ // transaction in progress?
+ //
+ sendSync();
+ processResults(handler, flags, adaptiveFetch);
+ estimatedReceiveBufferBytes = 0;
+ handler
+ .handleError(new PSQLException(GT.tr("Unable to bind parameter values for statement."),
+ PSQLState.INVALID_PARAMETER_VALUE, se.getIOException()));
+ }
+ } catch (IOException e) {
+ abort();
+ handler.handleError(
+ new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+ PSQLState.CONNECTION_FAILURE, e));
+ }
+
+ try {
+ handler.handleCompletion();
+ if (cleanupSavePoints) {
+ releaseSavePoint(autosave, flags);
+ }
+ } catch (SQLException e) {
+ rollbackIfRequired(autosave, e);
+ }
+ }
+ }
+
+ private boolean sendAutomaticSavepoint(Query query, int flags) throws IOException {
+ if (((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) == 0
+ || getTransactionState() == TransactionState.OPEN)
+ && query != restoreToAutoSave
+ && !"COMMIT".equalsIgnoreCase(query.getNativeSql())
+ && getAutoSave() != AutoSave.NEVER
+ // If query has no resulting fields, it cannot fail with 'cached plan must not change result type'
+ // thus no need to set a savepoint before such query
+ && (getAutoSave() == AutoSave.ALWAYS
+ // If CompositeQuery is observed, just assume it might fail and set the savepoint
+ || !(query instanceof SimpleQuery)
+ || ((SimpleQuery) query).getFields() != null)) {
/*
create a different SAVEPOINT the first time so that all subsequent SAVEPOINTS can be released
easily. There have been reports of server resources running out if there are too many
SAVEPOINTS.
*/
- sendOneQuery(autoSaveQuery, SimpleQuery.NO_PARAMETERS, 1, 0,
- QUERY_NO_RESULTS | QUERY_NO_METADATA
- // PostgreSQL does not support bind, exec, simple, sync message flow,
- // so we force autosavepoint to use simple if the main query is using simple
- | QUERY_EXECUTE_AS_SIMPLE);
- return true;
- }
- return false;
- }
-
- private void releaseSavePoint(boolean autosave, int flags) throws SQLException {
- if ( autosave
- && getAutoSave() == AutoSave.ALWAYS
- && getTransactionState() == TransactionState.OPEN) {
- try {
- sendOneQuery(releaseAutoSave, SimpleQuery.NO_PARAMETERS, 1, 0,
- QUERY_NO_RESULTS | QUERY_NO_METADATA
- | QUERY_EXECUTE_AS_SIMPLE);
-
- } catch (IOException ex) {
- throw new PSQLException(GT.tr("Error releasing savepoint"), PSQLState.IO_ERROR);
- }
- }
- }
-
- private void rollbackIfRequired(boolean autosave, SQLException e) throws SQLException {
- if (autosave
- && getTransactionState() == TransactionState.FAILED
- && (getAutoSave() == AutoSave.ALWAYS || willHealOnRetry(e))) {
- try {
- // ROLLBACK and AUTOSAVE are executed as simple always to overcome "statement no longer exists S_xx"
- execute(restoreToAutoSave, SimpleQuery.NO_PARAMETERS, new ResultHandlerDelegate(null),
- 1, 0, QUERY_NO_RESULTS | QUERY_NO_METADATA | QUERY_EXECUTE_AS_SIMPLE);
- } catch (SQLException e2) {
- // That's O(N), sorry
- e.setNextException(e2);
- }
- }
- throw e;
- }
-
- // Deadlock avoidance:
- //
- // It's possible for the send and receive streams to get "deadlocked" against each other since
- // we do not have a separate thread. The scenario is this: we have two streams:
- //
- // driver -> TCP buffering -> server
- // server -> TCP buffering -> driver
- //
- // The server behaviour is roughly:
- // while true:
- // read message
- // execute message
- // write results
- //
- // If the server -> driver stream has a full buffer, the write will block.
- // If the driver is still writing when this happens, and the driver -> server
- // stream also fills up, we deadlock: the driver is blocked on write() waiting
- // for the server to read some more data, and the server is blocked on write()
- // waiting for the driver to read some more data.
- //
- // To avoid this, we guess at how much response data we can request from the
- // server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES).
- // This is the point where the server blocks on write and stops reading data. If we
- // reach this point, we force a Sync message and read pending data from the server
- // until ReadyForQuery, then go back to writing more queries unless we saw an error.
- //
- // This is not 100% reliable -- it's only done in the batch-query case and only
- // at a reasonably high level (per query, not per message), and it's only an estimate
- // -- so it might break. To do it correctly in all cases would seem to require a
- // separate send or receive thread as we can only do the Sync-and-read-results
- // operation at particular points, and also as we don't really know how much data
- // the server is sending.
- //
- // Our message size estimation is coarse, and disregards asynchronous
- // notifications, warnings/info/debug messages, etc, so the response size may be
- // quite different from the 250 bytes assumed here even for queries that don't
- // return data.
- //
- // See github issue #194 and #195 .
- //
- // Assume 64k server->client buffering, which is extremely conservative. A typical
- // system will have 200kb or more of buffers for its receive buffers, and the sending
- // system will typically have the same on the send side, giving us 400kb or to work
- // with. (We could check Java's receive buffer size, but prefer to assume a very
- // conservative buffer instead, and we don't know how big the server's send
- // buffer is.)
- //
- private static final int MAX_BUFFERED_RECV_BYTES = 64000;
- private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250;
-
- @Override
- public void execute(Query[] queries, ParameterList[] parameterLists,
- BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags) throws SQLException {
- execute(queries, parameterLists, batchHandler, maxRows, fetchSize, flags, false);
- }
-
- @Override
- public void execute(Query[] queries, ParameterList[] parameterLists,
- BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags, boolean adaptiveFetch)
- throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- waitOnLock();
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " batch execute {0} queries, handler={1}, maxRows={2}, fetchSize={3}, flags={4}",
- new Object[]{queries.length, batchHandler, maxRows, fetchSize, flags});
- }
-
- flags = updateQueryMode(flags);
-
- boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
- // Check parameters and resolve OIDs.
- if (!describeOnly) {
- for (ParameterList parameterList : parameterLists) {
- if (parameterList != null) {
- ((V3ParameterList) parameterList).checkAllParametersSet();
- }
+ sendOneQuery(autoSaveQuery, SimpleQuery.NO_PARAMETERS, 1, 0,
+ QUERY_NO_RESULTS | QUERY_NO_METADATA
+ // PostgreSQL does not support bind, exec, simple, sync message flow,
+ // so we force autosavepoint to use simple if the main query is using simple
+ | QUERY_EXECUTE_AS_SIMPLE);
+ return true;
}
- }
-
- boolean autosave = false;
- ResultHandler handler = batchHandler;
- try {
- handler = sendQueryPreamble(batchHandler, flags);
- autosave = sendAutomaticSavepoint(queries[0], flags);
- estimatedReceiveBufferBytes = 0;
-
- for (int i = 0; i < queries.length; i++) {
- Query query = queries[i];
- V3ParameterList parameters = (V3ParameterList) parameterLists[i];
- if (parameters == null) {
- parameters = SimpleQuery.NO_PARAMETERS;
- }
-
- sendQuery(query, parameters, maxRows, fetchSize, flags, handler, batchHandler, adaptiveFetch);
-
- if (handler.getException() != null) {
- break;
- }
- }
-
- if (handler.getException() == null) {
- if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
- // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
- // on its own
- } else {
- sendSync();
- }
- processResults(handler, flags, adaptiveFetch);
- estimatedReceiveBufferBytes = 0;
- }
- } catch (IOException e) {
- abort();
- handler.handleError(
- new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
- PSQLState.CONNECTION_FAILURE, e));
- }
-
- try {
- handler.handleCompletion();
- if (cleanupSavePoints) {
- releaseSavePoint(autosave, flags);
- }
- } catch (SQLException e) {
- rollbackIfRequired(autosave, e);
- }
- }
- }
-
- private ResultHandler sendQueryPreamble(final ResultHandler delegateHandler, int flags)
- throws IOException {
- // First, send CloseStatements for finalized SimpleQueries that had statement names assigned.
- processDeadParsedQueries();
- processDeadPortals();
-
- // Send BEGIN on first statement in transaction.
- if ((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) != 0
- || getTransactionState() != TransactionState.IDLE) {
- return delegateHandler;
+ return false;
}
- int beginFlags = QueryExecutor.QUERY_NO_METADATA;
- if ((flags & QueryExecutor.QUERY_ONESHOT) != 0) {
- beginFlags |= QueryExecutor.QUERY_ONESHOT;
- }
+ private void releaseSavePoint(boolean autosave, int flags) throws SQLException {
+ if (autosave
+ && getAutoSave() == AutoSave.ALWAYS
+ && getTransactionState() == TransactionState.OPEN) {
+ try {
+ sendOneQuery(releaseAutoSave, SimpleQuery.NO_PARAMETERS, 1, 0,
+ QUERY_NO_RESULTS | QUERY_NO_METADATA
+ | QUERY_EXECUTE_AS_SIMPLE);
- beginFlags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
-
- beginFlags = updateQueryMode(beginFlags);
-
- final SimpleQuery beginQuery = (flags & QueryExecutor.QUERY_READ_ONLY_HINT) == 0 ? beginTransactionQuery : beginReadOnlyTransactionQuery;
-
- sendOneQuery(beginQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
-
- // Insert a handler that intercepts the BEGIN.
- return new ResultHandlerDelegate(delegateHandler) {
- private boolean sawBegin = false;
-
- @Override
- public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
- ResultCursor cursor) {
- if (sawBegin) {
- super.handleResultRows(fromQuery, fields, tuples, cursor);
- }
- }
-
- @Override
- public void handleCommandStatus(String status, long updateCount, long insertOID) {
- if (!sawBegin) {
- sawBegin = true;
- if (!"BEGIN".equals(status)) {
- handleError(new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
- PSQLState.PROTOCOL_VIOLATION));
- }
- } else {
- super.handleCommandStatus(status, updateCount, insertOID);
- }
- }
- };
- }
-
- //
- // Fastpath
- //
-
- @Override
- @SuppressWarnings("deprecation")
- public byte [] fastpathCall(int fnid, ParameterList parameters,
- boolean suppressBegin)
- throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- waitOnLock();
- if (!suppressBegin) {
- doSubprotocolBegin();
- }
- try {
- sendFastpathCall(fnid, (SimpleParameterList) parameters);
- return receiveFastpathResult();
- } catch (IOException ioe) {
- abort();
- throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
- }
- }
-
- public void doSubprotocolBegin() throws SQLException {
- if (getTransactionState() == TransactionState.IDLE) {
-
- LOGGER.log(Level.FINEST, "Issuing BEGIN before fastpath or copy call.");
-
- ResultHandler handler = new ResultHandlerBase() {
- private boolean sawBegin = false;
-
- @Override
- public void handleCommandStatus(String status, long updateCount, long insertOID) {
- if (!sawBegin) {
- if (!"BEGIN".equals(status)) {
- handleError(
- new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
- PSQLState.PROTOCOL_VIOLATION));
+ } catch (IOException ex) {
+ throw new PSQLException(GT.tr("Error releasing savepoint"), PSQLState.IO_ERROR);
}
- sawBegin = true;
- } else {
- handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status),
- PSQLState.PROTOCOL_VIOLATION));
- }
}
-
- @Override
- public void handleWarning(SQLWarning warning) {
- // we don't want to ignore warnings and it would be tricky
- // to chain them back to the connection, so since we don't
- // expect to get them in the first place, we just consider
- // them errors.
- handleError(warning);
- }
- };
-
- try {
- /* Send BEGIN with simple protocol preferred */
- int beginFlags = QueryExecutor.QUERY_NO_METADATA
- | QueryExecutor.QUERY_ONESHOT
- | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
- beginFlags = updateQueryMode(beginFlags);
- sendOneQuery(beginTransactionQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
- sendSync();
- processResults(handler, 0);
- estimatedReceiveBufferBytes = 0;
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
}
- }
-
- @Override
- @SuppressWarnings("deprecation")
- public ParameterList createFastpathParameters(int count) {
- return new SimpleParameterList(count, this);
- }
-
- private void sendFastpathCall(int fnid, SimpleParameterList params)
- throws SQLException, IOException {
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " FE=> FunctionCall({0}, {1} params)", new Object[]{fnid, params.getParameterCount()});
- }
-
- //
- // Total size = 4 (length)
- // + 4 (function OID)
- // + 2 (format code count) + N * 2 (format codes)
- // + 2 (parameter count) + encodedSize (parameters)
- // + 2 (result format)
-
- int paramCount = params.getParameterCount();
- int encodedSize = 0;
- for (int i = 1; i <= paramCount; i++) {
- if (params.isNull(i)) {
- encodedSize += 4;
- } else {
- encodedSize += 4 + params.getV3Length(i);
- }
- }
-
- pgStream.sendChar('F');
- pgStream.sendInteger4(4 + 4 + 2 + 2 * paramCount + 2 + encodedSize + 2);
- pgStream.sendInteger4(fnid);
- pgStream.sendInteger2(paramCount);
- for (int i = 1; i <= paramCount; i++) {
- pgStream.sendInteger2(params.isBinary(i) ? 1 : 0);
- }
- pgStream.sendInteger2(paramCount);
- for (int i = 1; i <= paramCount; i++) {
- if (params.isNull(i)) {
- pgStream.sendInteger4(-1);
- } else {
- pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
- params.writeV3Value(i, pgStream);
- }
- }
- pgStream.sendInteger2(1); // Binary result format
- pgStream.flush();
- }
-
- // Just for API compatibility with previous versions.
- @Override
- public void processNotifies() throws SQLException {
- processNotifies(-1);
- }
-
- /**
- * @param timeoutMillis when > 0, block for this time
- * when =0, block forever
- * when < 0, don't block
- */
- @Override
- public void processNotifies(int timeoutMillis) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- waitOnLock();
- // Asynchronous notifies only arrive when we are not in a transaction
- if (getTransactionState() != TransactionState.IDLE) {
- return;
- }
-
- if (hasNotifications()) {
- // No need to timeout when there are already notifications. We just check for more in this case.
- timeoutMillis = -1;
- }
-
- boolean useTimeout = timeoutMillis > 0;
- long startTime = 0L;
- int oldTimeout = 0;
- if (useTimeout) {
- startTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
- try {
- oldTimeout = pgStream.getSocket().getSoTimeout();
- } catch (SocketException e) {
- throw new PSQLException(GT.tr("An error occurred while trying to get the socket "
- + "timeout."), PSQLState.CONNECTION_FAILURE, e);
- }
- }
-
- try {
- while (timeoutMillis >= 0 || pgStream.hasMessagePending()) {
- if (useTimeout && timeoutMillis >= 0) {
- setSocketTimeout(timeoutMillis);
- }
- int c = pgStream.receiveChar();
- if (useTimeout && timeoutMillis >= 0) {
- setSocketTimeout(0); // Don't timeout after first char
- }
- switch (c) {
- case 'A': // Asynchronous Notify
- receiveAsyncNotify();
- timeoutMillis = -1;
- continue;
- case 'E':
- // Error Response (response to pretty much everything; backend then skips until Sync)
- throw receiveErrorResponse();
- case 'N': // Notice Response (warnings / info)
- SQLWarning warning = receiveNoticeResponse();
- addWarning(warning);
- if (useTimeout) {
- long newTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
- timeoutMillis = timeoutMillis + (int)(startTime - newTimeMillis); // Overflows after 49 days, ignore that
- startTime = newTimeMillis;
- if (timeoutMillis == 0) {
- timeoutMillis = -1; // Don't accidentally wait forever
- }
- }
- break;
- default:
- throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
- PSQLState.CONNECTION_FAILURE);
- }
- }
- } catch (SocketTimeoutException ioe) {
- // No notifications this time...
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
- PSQLState.CONNECTION_FAILURE, ioe);
- } finally {
- if (useTimeout) {
- setSocketTimeout(oldTimeout);
- }
- }
- }
- }
-
- private void setSocketTimeout(int millis) throws PSQLException {
- try {
- Socket s = pgStream.getSocket();
- if (!s.isClosed()) { // Is this check required?
- pgStream.setNetworkTimeout(millis);
- }
- } catch (IOException e) {
- throw new PSQLException(GT.tr("An error occurred while trying to reset the socket timeout."),
- PSQLState.CONNECTION_FAILURE, e);
- }
- }
-
- private byte [] receiveFastpathResult() throws IOException, SQLException {
- boolean endQuery = false;
- SQLException error = null;
- byte[] returnValue = null;
-
- while (!endQuery) {
- int c = pgStream.receiveChar();
- switch (c) {
- case 'A': // Asynchronous Notify
- receiveAsyncNotify();
- break;
-
- case 'E':
- // Error Response (response to pretty much everything; backend then skips until Sync)
- SQLException newError = receiveErrorResponse();
- if (error == null) {
- error = newError;
- } else {
- error.setNextException(newError);
- }
- // keep processing
- break;
-
- case 'N': // Notice Response (warnings / info)
- SQLWarning warning = receiveNoticeResponse();
- addWarning(warning);
- break;
-
- case 'Z': // Ready For Query (eventual response to Sync)
- receiveRFQ();
- endQuery = true;
- break;
-
- case 'V': // FunctionCallResponse
- int msgLen = pgStream.receiveInteger4();
- int valueLen = pgStream.receiveInteger4();
-
- LOGGER.log(Level.FINEST, " <=BE FunctionCallResponse({0} bytes)", valueLen);
-
- if (valueLen != -1) {
- byte[] buf = new byte[valueLen];
- pgStream.receive(buf, 0, valueLen);
- returnValue = buf;
- }
-
- break;
-
- case 'S': // Parameter Status
- try {
- receiveParameterStatus();
- } catch (SQLException e) {
- if (error == null) {
- error = e;
- } else {
- error.setNextException(e);
+ private void rollbackIfRequired(boolean autosave, SQLException e) throws SQLException {
+ if (autosave
+ && getTransactionState() == TransactionState.FAILED
+ && (getAutoSave() == AutoSave.ALWAYS || willHealOnRetry(e))) {
+ try {
+ // ROLLBACK and AUTOSAVE are executed as simple always to overcome "statement no longer exists S_xx"
+ execute(restoreToAutoSave, SimpleQuery.NO_PARAMETERS, new ResultHandlerDelegate(null),
+ 1, 0, QUERY_NO_RESULTS | QUERY_NO_METADATA | QUERY_EXECUTE_AS_SIMPLE);
+ } catch (SQLException e2) {
+ // That's O(N), sorry
+ e.setNextException(e2);
}
- endQuery = true;
- }
- break;
-
- default:
- throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
- PSQLState.CONNECTION_FAILURE);
- }
-
+ }
+ throw e;
}
- // did we get an error during this query?
- if (error != null) {
- throw error;
+ @Override
+ public void execute(Query[] queries, ParameterList[] parameterLists,
+ BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags) throws SQLException {
+ execute(queries, parameterLists, batchHandler, maxRows, fetchSize, flags, false);
}
- return returnValue;
- }
-
- //
- // Copy subprotocol implementation
- //
-
- /**
- * Sends given query to BE to start, initialize and lock connection for a CopyOperation.
- *
- * @param sql COPY FROM STDIN / COPY TO STDOUT statement
- * @return CopyIn or CopyOut operation object
- * @throws SQLException on failure
- */
- @Override
- public CopyOperation startCopy(String sql, boolean suppressBegin)
- throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- waitOnLock();
- if (!suppressBegin) {
- doSubprotocolBegin();
- }
- byte[] buf = sql.getBytes(StandardCharsets.UTF_8);
-
- try {
- LOGGER.log(Level.FINEST, " FE=> Query(CopyStart)");
-
- pgStream.sendChar('Q');
- pgStream.sendInteger4(buf.length + 4 + 1);
- pgStream.send(buf);
- pgStream.sendChar(0);
- pgStream.flush();
-
- return processCopyResults(null, true);
- // expect a CopyInResponse or CopyOutResponse to our query above
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Database connection failed when starting copy"),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
- }
- }
-
- /**
- * Locks connection and calls initializer for a new CopyOperation Called via startCopy ->
- * processCopyResults.
- *
- * @param op an uninitialized CopyOperation
- * @throws SQLException on locking failure
- * @throws IOException on database connection failure
- */
- private void initCopy(CopyOperationImpl op) throws SQLException, IOException {
- try (ResourceLock ignore = lock.obtain()) {
- pgStream.receiveInteger4(); // length not used
- int rowFormat = pgStream.receiveChar();
- int numFields = pgStream.receiveInteger2();
- int[] fieldFormats = new int[numFields];
-
- for (int i = 0; i < numFields; i++) {
- fieldFormats[i] = pgStream.receiveInteger2();
- }
-
- lock(op);
- op.init(this, rowFormat, fieldFormats);
- }
- }
-
- /**
- * Finishes a copy operation and unlocks connection discarding any exchanged data.
- *
- * @param op the copy operation presumably currently holding lock on this connection
- * @throws SQLException on any additional failure
- */
- public void cancelCopy(CopyOperationImpl op) throws SQLException {
- if (!hasLock(op)) {
- throw new PSQLException(GT.tr("Tried to cancel an inactive copy operation"),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- SQLException error = null;
- int errors = 0;
-
- try {
- if (op instanceof CopyIn) {
+ @Override
+ public void execute(Query[] queries, ParameterList[] parameterLists,
+ BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags, boolean adaptiveFetch)
+ throws SQLException {
try (ResourceLock ignore = lock.obtain()) {
- LOGGER.log(Level.FINEST, "FE => CopyFail");
- final byte[] msg = "Copy cancel requested".getBytes(StandardCharsets.US_ASCII);
- pgStream.sendChar('f'); // CopyFail
- pgStream.sendInteger4(5 + msg.length);
- pgStream.send(msg);
- pgStream.sendChar(0);
- pgStream.flush();
- do {
- try {
- processCopyResults(op, true); // discard rest of input
- } catch (SQLException se) { // expected error response to failing copy
- errors++;
- if (error != null) {
- SQLException e = se;
- SQLException next;
- while ((next = e.getNextException()) != null) {
- e = next;
- }
- e.setNextException(error);
- }
- error = se;
- }
- } while (hasLock(op));
- }
- } else if (op instanceof CopyOut) {
- sendQueryCancel();
- }
-
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Database connection failed when canceling copy operation"),
- PSQLState.CONNECTION_FAILURE, ioe);
- } finally {
- // Need to ensure the lock isn't held anymore, or else
- // future operations, rather than failing due to the
- // broken connection, will simply hang waiting for this
- // lock.
- try (ResourceLock ignore = lock.obtain()) {
- if (hasLock(op)) {
- unlock(op);
- }
- }
- }
-
- if (op instanceof CopyIn) {
- if (errors < 1) {
- throw new PSQLException(GT.tr("Missing expected error response to copy cancel request"),
- PSQLState.COMMUNICATION_ERROR);
- } else if (errors > 1) {
- throw new PSQLException(
- GT.tr("Got {0} error responses to single copy cancel request", String.valueOf(errors)),
- PSQLState.COMMUNICATION_ERROR, error);
- }
- }
- }
-
- /**
- * Finishes writing to copy and unlocks connection.
- *
- * @param op the copy operation presumably currently holding lock on this connection
- * @return number of rows updated for server versions 8.2 or newer
- * @throws SQLException on failure
- */
- public long endCopy(CopyOperationImpl op) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- if (!hasLock(op)) {
- throw new PSQLException(GT.tr("Tried to end inactive copy"), PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- try {
- LOGGER.log(Level.FINEST, " FE=> CopyDone");
-
- pgStream.sendChar('c'); // CopyDone
- pgStream.sendInteger4(4);
- pgStream.flush();
-
- do {
- processCopyResults(op, true);
- } while (hasLock(op));
- return op.getHandledRowCount();
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Database connection failed when ending copy"),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
- }
- }
-
- /**
- * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
- * returns CommandComplete, which should not happen
- *
- * @param op the CopyIn operation presumably currently holding lock on this connection
- * @param data bytes to send
- * @param off index of first byte to send (usually 0)
- * @param siz number of bytes to send (usually data.length)
- * @throws SQLException on failure
- */
- public void writeToCopy(CopyOperationImpl op, byte[] data, int off, int siz)
- throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- if (!hasLock(op)) {
- throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
-
- try {
- pgStream.sendChar('d');
- pgStream.sendInteger4(siz + 4);
- pgStream.send(data, off, siz);
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
- }
- }
-
- /**
- * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
- * returns CommandComplete, which should not happen
- *
- * @param op the CopyIn operation presumably currently holding lock on this connection
- * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
- * @throws SQLException on failure
- */
- public void writeToCopy(CopyOperationImpl op, ByteStreamWriter from)
- throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- if (!hasLock(op)) {
- throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- int siz = from.getLength();
- LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
-
- try {
- pgStream.sendChar('d');
- pgStream.sendInteger4(siz + 4);
- pgStream.send(from);
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
- }
- }
-
- public void flushCopy(CopyOperationImpl op) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- if (!hasLock(op)) {
- throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- try {
- pgStream.flush();
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
- }
- }
-
- /**
- * Wait for a row of data to be received from server on an active copy operation
- * Connection gets unlocked by processCopyResults() at end of operation.
- *
- * @param op the copy operation presumably currently holding lock on this connection
- * @param block whether to block waiting for input
- * @throws SQLException on any failure
- */
- void readFromCopy(CopyOperationImpl op, boolean block) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- if (!hasLock(op)) {
- throw new PSQLException(GT.tr("Tried to read from inactive copy"),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- try {
- processCopyResults(op, block); // expect a call to handleCopydata() to store the data
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("Database connection failed when reading from copy"),
- PSQLState.CONNECTION_FAILURE, ioe);
- }
- }
- }
-
- AtomicBoolean processingCopyResults = new AtomicBoolean(false);
-
- /**
- * Handles copy sub protocol responses from server. Unlocks at end of sub protocol, so operations
- * on pgStream or QueryExecutor are not allowed in a method after calling this!
- *
- * @param block whether to block waiting for input
- * @return CopyIn when COPY FROM STDIN starts; CopyOut when COPY TO STDOUT starts; null when copy
- * ends; otherwise, the operation given as parameter.
- * @throws SQLException in case of misuse
- * @throws IOException from the underlying connection
- */
- CopyOperationImpl processCopyResults(CopyOperationImpl op, boolean block)
- throws SQLException, IOException {
-
- /*
- * fixes issue #1592 where one thread closes the stream and another is reading it
- */
- if (pgStream.isClosed()) {
- throw new PSQLException(GT.tr("PGStream is closed"),
- PSQLState.CONNECTION_DOES_NOT_EXIST);
- }
- /*
- * This is a hack as we should not end up here, but sometimes do with large copy operations.
- */
- if (!processingCopyResults.compareAndSet(false, true)) {
- LOGGER.log(Level.INFO, "Ignoring request to process copy results, already processing");
- return null;
- }
-
- // put this all in a try, finally block and reset the processingCopyResults in the finally clause
- try {
- boolean endReceiving = false;
- SQLException error = null;
- SQLException errors = null;
- int len;
-
- while (!endReceiving && (block || pgStream.hasMessagePending())) {
-
- // There is a bug in the server's implementation of the copy
- // protocol. It returns command complete immediately upon
- // receiving the EOF marker in the binary protocol,
- // potentially before we've issued CopyDone. When we are not
- // blocking, we don't think we are done, so we hold off on
- // processing command complete and any subsequent messages
- // until we actually are done with the copy.
- //
- if (!block) {
- int c = pgStream.peekChar();
- if (c == 'C') {
- // CommandComplete
- LOGGER.log(Level.FINEST, " <=BE CommandStatus, Ignored until CopyDone");
- break;
- }
- }
-
- int c = pgStream.receiveChar();
- switch (c) {
-
- case 'A': // Asynchronous Notify
-
- LOGGER.log(Level.FINEST, " <=BE Asynchronous Notification while copying");
-
- receiveAsyncNotify();
- break;
-
- case 'N': // Notice Response
-
- LOGGER.log(Level.FINEST, " <=BE Notification while copying");
-
- addWarning(receiveNoticeResponse());
- break;
-
- case 'C': // Command Complete
-
- String status = receiveCommandStatus();
-
- try {
- if (op == null) {
- throw new PSQLException(GT
- .tr("Received CommandComplete ''{0}'' without an active copy operation", status),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
- op.handleCommandStatus(status);
- } catch (SQLException se) {
- error = se;
- }
-
- block = true;
- break;
-
- case 'E': // ErrorMessage (expected response to CopyFail)
-
- error = receiveErrorResponse();
- // We've received the error and we now expect to receive
- // Ready for query, but we must block because it might still be
- // on the wire and not here yet.
- block = true;
- break;
-
- case 'G': // CopyInResponse
-
- LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
-
- if (op != null) {
- error = new PSQLException(GT.tr("Got CopyInResponse from server during an active {0}",
- op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- op = new CopyInImpl();
- initCopy(op);
- endReceiving = true;
- break;
-
- case 'H': // CopyOutResponse
-
- LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
-
- if (op != null) {
- error = new PSQLException(GT.tr("Got CopyOutResponse from server during an active {0}",
- op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- op = new CopyOutImpl();
- initCopy(op);
- endReceiving = true;
- break;
-
- case 'W': // CopyBothResponse
-
- LOGGER.log(Level.FINEST, " <=BE CopyBothResponse");
-
- if (op != null) {
- error = new PSQLException(GT.tr("Got CopyBothResponse from server during an active {0}",
- op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- op = new CopyDualImpl();
- initCopy(op);
- endReceiving = true;
- break;
-
- case 'd': // CopyData
-
- LOGGER.log(Level.FINEST, " <=BE CopyData");
-
- len = pgStream.receiveInteger4() - 4;
-
- assert len > 0 : "Copy Data length must be greater than 4";
-
- byte[] buf = pgStream.receive(len);
- if (op == null) {
- error = new PSQLException(GT.tr("Got CopyData without an active copy operation"),
- PSQLState.OBJECT_NOT_IN_STATE);
- } else if (!(op instanceof CopyOut)) {
- error = new PSQLException(
- GT.tr("Unexpected copydata from server for {0}", op.getClass().getName()),
- PSQLState.COMMUNICATION_ERROR);
- } else {
- op.handleCopydata(buf);
- }
- endReceiving = true;
- break;
-
- case 'c': // CopyDone (expected after all copydata received)
-
- LOGGER.log(Level.FINEST, " <=BE CopyDone");
-
- len = pgStream.receiveInteger4() - 4;
- if (len > 0) {
- pgStream.receive(len); // not in specification; should never appear
- }
-
- if (!(op instanceof CopyOut)) {
- error = new PSQLException("Got CopyDone while not copying from server",
- PSQLState.OBJECT_NOT_IN_STATE);
- }
-
- // keep receiving since we expect a CommandComplete
- block = true;
- break;
- case 'S': // Parameter Status
- try {
- receiveParameterStatus();
- } catch (SQLException e) {
- error = e;
- endReceiving = true;
- }
- break;
-
- case 'Z': // ReadyForQuery: After FE:CopyDone => BE:CommandComplete
-
- receiveRFQ();
- if (op != null && hasLock(op)) {
- unlock(op);
- }
- op = null;
- endReceiving = true;
- break;
-
- // If the user sends a non-copy query, we've got to handle some additional things.
- //
- case 'T': // Row Description (response to Describe)
- LOGGER.log(Level.FINEST, " <=BE RowDescription (during copy ignored)");
-
- skipMessage();
- break;
-
- case 'D': // DataRow
- LOGGER.log(Level.FINEST, " <=BE DataRow (during copy ignored)");
-
- skipMessage();
- break;
-
- default:
- throw new IOException(
- GT.tr("Unexpected packet type during copy: {0}", Integer.toString(c)));
- }
-
- // Collect errors into a neat chain for completeness
- if (error != null) {
- if (errors != null) {
- error.setNextException(errors);
- }
- errors = error;
- error = null;
- }
- }
-
- if (errors != null) {
- throw errors;
- }
- return op;
-
- } finally {
- /*
- reset here in the finally block to make sure it really is cleared
- */
- processingCopyResults.set(false);
- }
- }
-
- /*
- * To prevent client/server protocol deadlocks, we try to manage the estimated recv buffer size
- * and force a sync +flush and process results if we think it might be getting too full.
- *
- * See the comments above MAX_BUFFERED_RECV_BYTES's declaration for details.
- */
- private void flushIfDeadlockRisk(Query query, boolean disallowBatching,
- ResultHandler resultHandler,
- BatchResultHandler batchHandler,
- final int flags) throws IOException {
- // Assume all statements need at least this much reply buffer space,
- // plus params
- estimatedReceiveBufferBytes += NODATA_QUERY_RESPONSE_SIZE_BYTES;
-
- SimpleQuery sq = (SimpleQuery) query;
- if (sq.isStatementDescribed()) {
- /*
- * Estimate the response size of the fields and add it to the expected response size.
- *
- * It's impossible for us to estimate the rowcount. We'll assume one row, as that's the common
- * case for batches and we're leaving plenty of breathing room in this approach. It's still
- * not deadlock-proof though; see pgjdbc github issues #194 and #195.
- */
- int maxResultRowSize = sq.getMaxResultRowSize();
- if (maxResultRowSize >= 0) {
- estimatedReceiveBufferBytes += maxResultRowSize;
- } else {
- LOGGER.log(Level.FINEST, "Couldn't estimate result size or result size unbounded, "
- + "disabling batching for this query.");
- disallowBatching = true;
- }
- } else {
- /*
- * We only describe a statement if we're expecting results from it, so it's legal to batch
- * unprepared statements. We'll abort later if we get any uresults from them where none are
- * expected. For now all we can do is hope the user told us the truth and assume that
- * NODATA_QUERY_RESPONSE_SIZE_BYTES is enough to cover it.
- */
- }
-
- if (disallowBatching || estimatedReceiveBufferBytes >= MAX_BUFFERED_RECV_BYTES) {
- LOGGER.log(Level.FINEST, "Forcing Sync, receive buffer full or batching disallowed");
- sendSync();
- processResults(resultHandler, flags);
- estimatedReceiveBufferBytes = 0;
- if (batchHandler != null) {
- batchHandler.secureProgress();
- }
- }
-
- }
-
- /*
- * Send a query to the backend.
- */
- private void sendQuery(Query query, V3ParameterList parameters, int maxRows, int fetchSize,
- int flags, ResultHandler resultHandler,
- BatchResultHandler batchHandler, boolean adaptiveFetch) throws IOException, SQLException {
- // Now the query itself.
- Query[] subqueries = query.getSubqueries();
- SimpleParameterList[] subparams = parameters.getSubparams();
-
- // We know this is deprecated, but still respect it in case anyone's using it.
- // PgJDBC its self no longer does.
- @SuppressWarnings("deprecation")
- boolean disallowBatching = (flags & QueryExecutor.QUERY_DISALLOW_BATCHING) != 0;
-
- if (subqueries == null) {
- flushIfDeadlockRisk(query, disallowBatching, resultHandler, batchHandler, flags);
-
- // If we saw errors, don't send anything more.
- if (resultHandler.getException() == null) {
- if (fetchSize != 0) {
- adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
- }
- sendOneQuery((SimpleQuery) query, (SimpleParameterList) parameters, maxRows, fetchSize,
- flags);
- }
- } else {
- for (int i = 0; i < subqueries.length; i++) {
- final Query subquery = subqueries[i];
- flushIfDeadlockRisk(subquery, disallowBatching, resultHandler, batchHandler, flags);
-
- // If we saw errors, don't send anything more.
- if (resultHandler.getException() != null) {
- break;
- }
-
- // In the situation where parameters is already
- // NO_PARAMETERS it cannot know the correct
- // number of array elements to return in the
- // above call to getSubparams(), so it must
- // return null which we check for here.
- //
- SimpleParameterList subparam = SimpleQuery.NO_PARAMETERS;
- if (subparams != null) {
- subparam = subparams[i];
- }
- if (fetchSize != 0) {
- adaptiveFetchCache.addNewQuery(adaptiveFetch, subquery);
- }
- sendOneQuery((SimpleQuery) subquery, subparam, maxRows, fetchSize, flags);
- }
- }
- }
-
- //
- // Message sending
- //
-
- private void sendSync() throws IOException {
- LOGGER.log(Level.FINEST, " FE=> Sync");
-
- pgStream.sendChar('S'); // Sync
- pgStream.sendInteger4(4); // Length
- pgStream.flush();
- // Below "add queues" are likely not required at all
- pendingExecuteQueue.add(new ExecuteRequest(sync, null, true));
- pendingDescribePortalQueue.add(sync);
- }
-
- private void sendParse(SimpleQuery query, SimpleParameterList params, boolean oneShot)
- throws IOException {
- // Already parsed, or we have a Parse pending and the types are right?
- int[] typeOIDs = params.getTypeOIDs();
- if (query.isPreparedFor(typeOIDs, deallocateEpoch)) {
- return;
- }
-
- // Clean up any existing statement, as we can't use it.
- query.unprepare();
- processDeadParsedQueries();
-
- // Remove any cached Field values. The re-parsed query might report different
- // fields because input parameter types may result in different type inferences
- // for unspecified types.
- query.setFields(null);
-
- String statementName = null;
- if (!oneShot) {
- // Generate a statement name to use.
- statementName = "S_" + (nextUniqueID++);
-
- // And prepare the new statement.
- // NB: Must clone the OID array, as it's a direct reference to
- // the SimpleParameterList's internal array that might be modified
- // under us.
- query.setStatementName(statementName, deallocateEpoch);
- query.setPrepareTypes(typeOIDs);
- registerParsedQuery(query, statementName);
- }
-
- byte[] encodedStatementName = query.getEncodedStatementName();
- String nativeSql = query.getNativeSql();
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- StringBuilder sbuf = new StringBuilder(" FE=> Parse(stmt=" + statementName + ",query=\"");
- sbuf.append(nativeSql);
- sbuf.append("\",oids={");
- for (int i = 1; i <= params.getParameterCount(); i++) {
- if (i != 1) {
- sbuf.append(",");
- }
- sbuf.append(params.getTypeOID(i));
- }
- sbuf.append("})");
- LOGGER.log(Level.FINEST, sbuf.toString());
- }
-
- //
- // Send Parse.
- //
-
- byte[] queryUtf8 = nativeSql.getBytes(StandardCharsets.UTF_8);
-
- // Total size = 4 (size field)
- // + N + 1 (statement name, zero-terminated)
- // + N + 1 (query, zero terminated)
- // + 2 (parameter count) + N * 4 (parameter types)
- int encodedSize = 4
- + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
- + queryUtf8.length + 1
- + 2 + 4 * params.getParameterCount();
-
- pgStream.sendChar('P'); // Parse
- pgStream.sendInteger4(encodedSize);
- if (encodedStatementName != null) {
- pgStream.send(encodedStatementName);
- }
- pgStream.sendChar(0); // End of statement name
- pgStream.send(queryUtf8); // Query string
- pgStream.sendChar(0); // End of query string.
- pgStream.sendInteger2(params.getParameterCount()); // # of parameter types specified
- for (int i = 1; i <= params.getParameterCount(); i++) {
- pgStream.sendInteger4(params.getTypeOID(i));
- }
-
- pendingParseQueue.add(query);
- }
-
- private void sendBind(SimpleQuery query, SimpleParameterList params, Portal portal,
- boolean noBinaryTransfer) throws IOException {
- //
- // Send Bind.
- //
-
- String statementName = query.getStatementName();
- byte[] encodedStatementName = query.getEncodedStatementName();
- byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- StringBuilder sbuf = new StringBuilder(" FE=> Bind(stmt=" + statementName + ",portal=" + portal);
- for (int i = 1; i <= params.getParameterCount(); i++) {
- sbuf.append(",$").append(i).append("=<")
- .append(params.toString(i, true))
- .append(">,type=").append(Oid.toString(params.getTypeOID(i)));
- }
- sbuf.append(")");
- LOGGER.log(Level.FINEST, sbuf.toString());
- }
-
- // Total size = 4 (size field) + N + 1 (destination portal)
- // + N + 1 (statement name)
- // + 2 (param format code count) + N * 2 (format codes)
- // + 2 (param value count) + N (encoded param value size)
- // + 2 (result format code count, 0)
- long encodedSize = 0;
- for (int i = 1; i <= params.getParameterCount(); i++) {
- if (params.isNull(i)) {
- encodedSize += 4;
- } else {
- encodedSize += (long) 4 + params.getV3Length(i);
- }
- }
-
- Field[] fields = query.getFields();
- if (!noBinaryTransfer && query.needUpdateFieldFormats() && fields != null) {
- for (Field field : fields) {
- if (useBinary(field)) {
- field.setFormat(Field.BINARY_FORMAT);
- query.setHasBinaryFields(true);
- }
- }
- }
- // If text-only results are required (e.g. updateable resultset), and the query has binary columns,
- // flip to text format.
- if (noBinaryTransfer && query.hasBinaryFields() && fields != null) {
- for (Field field : fields) {
- if (field.getFormat() != Field.TEXT_FORMAT) {
- field.setFormat(Field.TEXT_FORMAT);
- }
- }
- query.resetNeedUpdateFieldFormats();
- query.setHasBinaryFields(false);
- }
-
- // This is not the number of binary fields, but the total number
- // of fields if any of them are binary or zero if all of them
- // are text.
- int numBinaryFields = !noBinaryTransfer && query.hasBinaryFields() && fields != null
- ? fields.length : 0;
-
- encodedSize = 4
- + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1
- + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
- + 2 + params.getParameterCount() * 2
- + 2 + encodedSize
- + 2 + numBinaryFields * 2;
-
- // backend's MaxAllocSize is the largest message that can
- // be received from a client. If we have a bigger value
- // from either very large parameters or incorrect length
- // descriptions of setXXXStream we do not send the bind
- // message.
- //
- if (encodedSize > 0x3fffffff) {
- throw new PGBindException(new IOException(GT.tr(
- "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters.",
- encodedSize)));
- }
-
- pgStream.sendChar('B'); // Bind
- pgStream.sendInteger4((int) encodedSize); // Message size
- if (encodedPortalName != null) {
- pgStream.send(encodedPortalName); // Destination portal name.
- }
- pgStream.sendChar(0); // End of portal name.
- if (encodedStatementName != null) {
- pgStream.send(encodedStatementName); // Source statement name.
- }
- pgStream.sendChar(0); // End of statement name.
-
- pgStream.sendInteger2(params.getParameterCount()); // # of parameter format codes
- for (int i = 1; i <= params.getParameterCount(); i++) {
- pgStream.sendInteger2(params.isBinary(i) ? 1 : 0); // Parameter format code
- }
-
- pgStream.sendInteger2(params.getParameterCount()); // # of parameter values
-
- // If an error occurs when reading a stream we have to
- // continue pumping out data to match the length we
- // said we would. Once we've done that we throw
- // this exception. Multiple exceptions can occur and
- // it really doesn't matter which one is reported back
- // to the caller.
- //
- PGBindException bindException = null;
-
- for (int i = 1; i <= params.getParameterCount(); i++) {
- if (params.isNull(i)) {
- pgStream.sendInteger4(-1); // Magic size of -1 means NULL
- } else {
- pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
- try {
- params.writeV3Value(i, pgStream); // Parameter value
- } catch (PGBindException be) {
- bindException = be;
- }
- }
- }
-
- pgStream.sendInteger2(numBinaryFields); // # of result format codes
- for (int i = 0; fields != null && i < numBinaryFields; i++) {
- pgStream.sendInteger2(fields[i].getFormat());
- }
-
- pendingBindQueue.add(portal == null ? UNNAMED_PORTAL : portal);
-
- if (bindException != null) {
- throw bindException;
- }
- }
-
- /**
- * Returns true if the specified field should be retrieved using binary encoding.
- *
- * @param field The field whose Oid type to analyse.
- * @return True if {@link Field#BINARY_FORMAT} should be used, false if
- * {@link Field#BINARY_FORMAT}.
- */
- private boolean useBinary(Field field) {
- int oid = field.getOID();
- return useBinaryForReceive(oid);
- }
-
- private void sendDescribePortal(SimpleQuery query, Portal portal) throws IOException {
- //
- // Send Describe.
- //
-
- LOGGER.log(Level.FINEST, " FE=> Describe(portal={0})", portal);
-
- byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
-
- // Total size = 4 (size field) + 1 (describe type, 'P') + N + 1 (portal name)
- int encodedSize = 4 + 1 + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1;
-
- pgStream.sendChar('D'); // Describe
- pgStream.sendInteger4(encodedSize); // message size
- pgStream.sendChar('P'); // Describe (Portal)
- if (encodedPortalName != null) {
- pgStream.send(encodedPortalName); // portal name to close
- }
- pgStream.sendChar(0); // end of portal name
-
- pendingDescribePortalQueue.add(query);
- query.setPortalDescribed(true);
- }
-
- private void sendDescribeStatement(SimpleQuery query, SimpleParameterList params,
- boolean describeOnly) throws IOException {
- // Send Statement Describe
-
- LOGGER.log(Level.FINEST, " FE=> Describe(statement={0})", query.getStatementName());
-
- byte[] encodedStatementName = query.getEncodedStatementName();
-
- // Total size = 4 (size field) + 1 (describe type, 'S') + N + 1 (portal name)
- int encodedSize = 4 + 1 + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1;
-
- pgStream.sendChar('D'); // Describe
- pgStream.sendInteger4(encodedSize); // Message size
- pgStream.sendChar('S'); // Describe (Statement);
- if (encodedStatementName != null) {
- pgStream.send(encodedStatementName); // Statement name
- }
- pgStream.sendChar(0); // end message
-
- // Note: statement name can change over time for the same query object
- // Thus we take a snapshot of the query name
- pendingDescribeStatementQueue.add(
- new DescribeRequest(query, params, describeOnly, query.getStatementName()));
- pendingDescribePortalQueue.add(query);
- query.setStatementDescribed(true);
- query.setPortalDescribed(true);
- }
-
- private void sendExecute(SimpleQuery query, Portal portal, int limit)
- throws IOException {
- //
- // Send Execute.
- //
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " FE=> Execute(portal={0},limit={1})", new Object[]{portal, limit});
- }
-
- byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
- int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
-
- // Total size = 4 (size field) + 1 + N (source portal) + 4 (max rows)
- pgStream.sendChar('E'); // Execute
- pgStream.sendInteger4(4 + 1 + encodedSize + 4); // message size
- if (encodedPortalName != null) {
- pgStream.send(encodedPortalName); // portal name
- }
- pgStream.sendChar(0); // portal name terminator
- pgStream.sendInteger4(limit); // row limit
-
- pendingExecuteQueue.add(new ExecuteRequest(query, portal, false));
- }
-
- private void sendClosePortal(String portalName) throws IOException {
- //
- // Send Close.
- //
-
- LOGGER.log(Level.FINEST, " FE=> ClosePortal({0})", portalName);
-
- byte[] encodedPortalName = portalName == null ? null : portalName.getBytes(StandardCharsets.UTF_8);
- int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
-
- // Total size = 4 (size field) + 1 (close type, 'P') + 1 + N (portal name)
- pgStream.sendChar('C'); // Close
- pgStream.sendInteger4(4 + 1 + 1 + encodedSize); // message size
- pgStream.sendChar('P'); // Close (Portal)
- if (encodedPortalName != null) {
- pgStream.send(encodedPortalName);
- }
- pgStream.sendChar(0); // unnamed portal
- }
-
- private void sendCloseStatement(String statementName) throws IOException {
- //
- // Send Close.
- //
-
- LOGGER.log(Level.FINEST, " FE=> CloseStatement({0})", statementName);
-
- byte[] encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
-
- // Total size = 4 (size field) + 1 (close type, 'S') + N + 1 (statement name)
- pgStream.sendChar('C'); // Close
- pgStream.sendInteger4(4 + 1 + encodedStatementName.length + 1); // message size
- pgStream.sendChar('S'); // Close (Statement)
- pgStream.send(encodedStatementName); // statement to close
- pgStream.sendChar(0); // statement name terminator
- }
-
- // sendOneQuery sends a single statement via the extended query protocol.
- // Per the FE/BE docs this is essentially the same as how a simple query runs
- // (except that it generates some extra acknowledgement messages, and we
- // can send several queries before doing the Sync)
- //
- // Parse S_n from "query string with parameter placeholders"; skipped if already done previously
- // or if oneshot
- // Bind C_n from S_n plus parameters (or from unnamed statement for oneshot queries)
- // Describe C_n; skipped if caller doesn't want metadata
- // Execute C_n with maxRows limit; maxRows = 1 if caller doesn't want results
- // (above repeats once per call to sendOneQuery)
- // Sync (sent by caller)
- //
- private void sendOneQuery(SimpleQuery query, SimpleParameterList params, int maxRows,
- int fetchSize, int flags) throws IOException {
- boolean asSimple = (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0;
- if (asSimple) {
- assert (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) == 0
- : "Simple mode does not support describe requests. sql = " + query.getNativeSql()
- + ", flags = " + flags;
- sendSimpleQuery(query, params);
- return;
- }
-
- assert !query.getNativeQuery().multiStatement
- : "Queries that might contain ; must be executed with QueryExecutor.QUERY_EXECUTE_AS_SIMPLE mode. "
- + "Given query is " + query.getNativeSql();
-
- // Per https://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY
- // A Bind message can use the unnamed prepared statement to create a named portal.
- // If the Bind is successful, an Execute message can reference that named portal until either
- // the end of the current transaction
- // or the named portal is explicitly destroyed
-
- boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
- boolean noMeta = (flags & QueryExecutor.QUERY_NO_METADATA) != 0;
- boolean describeOnly = (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) != 0;
- // extended queries always use a portal
- // the usePortal flag controls whether or not we use a *named* portal
- boolean usePortal = (flags & QueryExecutor.QUERY_FORWARD_CURSOR) != 0 && !noResults && !noMeta
- && fetchSize > 0 && !describeOnly;
- boolean oneShot = (flags & QueryExecutor.QUERY_ONESHOT) != 0;
- boolean noBinaryTransfer = (flags & QUERY_NO_BINARY_TRANSFER) != 0;
- boolean forceDescribePortal = (flags & QUERY_FORCE_DESCRIBE_PORTAL) != 0;
-
- // Work out how many rows to fetch in this pass.
-
- int rows;
- if (noResults) {
- rows = 1; // We're discarding any results anyway, so limit data transfer to a minimum
- } else if (!usePortal) {
- rows = maxRows; // Not using a portal -- fetchSize is irrelevant
- } else if (maxRows != 0 && fetchSize > maxRows) {
- // fetchSize > maxRows, use maxRows (nb: fetchSize cannot be 0 if usePortal == true)
- rows = maxRows;
- } else {
- rows = fetchSize; // maxRows > fetchSize
- }
-
- sendParse(query, params, oneShot);
-
- // Must do this after sendParse to pick up any changes to the
- // query's state.
- //
- boolean queryHasUnknown = query.hasUnresolvedTypes();
- boolean paramsHasUnknown = params.hasUnresolvedTypes();
-
- boolean describeStatement = describeOnly
- || (!oneShot && paramsHasUnknown && queryHasUnknown && !query.isStatementDescribed());
-
- if (!describeStatement && paramsHasUnknown && !queryHasUnknown) {
- int[] queryOIDs = query.getPrepareTypes();
- int[] paramOIDs = params.getTypeOIDs();
- for (int i = 0; i < paramOIDs.length; i++) {
- // Only supply type information when there isn't any
- // already, don't arbitrarily overwrite user supplied
- // type information.
- if (paramOIDs[i] == Oid.UNSPECIFIED) {
- params.setResolvedType(i + 1, queryOIDs[i]);
- }
- }
- }
-
- if (describeStatement) {
- sendDescribeStatement(query, params, describeOnly);
- if (describeOnly) {
- return;
- }
- }
-
- // Construct a new portal if needed.
- Portal portal = null;
- if (usePortal) {
- String portalName = "C_" + (nextUniqueID++);
- portal = new Portal(query, portalName);
- }
-
- sendBind(query, params, portal, noBinaryTransfer);
-
- // A statement describe will also output a RowDescription,
- // so don't reissue it here if we've already done so.
- //
- if (!noMeta && !describeStatement) {
- /*
- * don't send describe if we already have cached the row description from previous executions
- *
- * XXX Clearing the fields / unpreparing the query (in sendParse) is incorrect, see bug #267.
- * We might clear the cached fields in a later execution of this query if the bind parameter
- * types change, but we're assuming here that they'll still be valid when we come to process
- * the results of this query, so we don't send a new describe here. We re-describe after the
- * fields are cleared, but the result of that gets processed after processing the results from
- * earlier executions that we didn't describe because we didn't think we had to.
- *
- * To work around this, force a Describe at each execution in batches where this can be a
- * problem. It won't cause more round trips so the performance impact is low, and it'll ensure
- * that the field information available when we decoded the results. This is undeniably a
- * hack, but there aren't many good alternatives.
- */
- if (!query.isPortalDescribed() || forceDescribePortal) {
- sendDescribePortal(query, portal);
- }
- }
-
- sendExecute(query, portal, rows);
- }
-
- private void sendSimpleQuery(SimpleQuery query, SimpleParameterList params) throws IOException {
- String nativeSql = query.toString(params);
-
- LOGGER.log(Level.FINEST, " FE=> SimpleQuery(query=\"{0}\")", nativeSql);
- Encoding encoding = pgStream.getEncoding();
-
- byte[] encoded = encoding.encode(nativeSql);
- pgStream.sendChar('Q');
- pgStream.sendInteger4(encoded.length + 4 + 1);
- pgStream.send(encoded);
- pgStream.sendChar(0);
- pgStream.flush();
- pendingExecuteQueue.add(new ExecuteRequest(query, null, true));
- pendingDescribePortalQueue.add(query);
- }
-
- //
- // Garbage collection of parsed statements.
- //
- // When a statement is successfully parsed, registerParsedQuery is called.
- // This creates a PhantomReference referring to the "owner" of the statement
- // (the originating Query object) and inserts that reference as a key in
- // parsedQueryMap. The values of parsedQueryMap are the corresponding allocated
- // statement names. The originating Query object also holds a reference to the
- // PhantomReference.
- //
- // When the owning Query object is closed, it enqueues and clears the associated
- // PhantomReference.
- //
- // If the owning Query object becomes unreachable (see java.lang.ref javadoc) before
- // being closed, the corresponding PhantomReference is enqueued on
- // parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued
- // when a GC occurs, so this is not necessarily prompt but should eventually happen.
- //
- // Periodically (currently, just before query execution), the parsedQueryCleanupQueue
- // is polled. For each enqueued PhantomReference we find, we remove the corresponding
- // entry from parsedQueryMap, obtaining the name of the underlying statement in the
- // process. Then we send a message to the backend to deallocate that statement.
- //
-
- private final HashMap, String> parsedQueryMap =
- new HashMap<>();
- private final ReferenceQueue parsedQueryCleanupQueue =
- new ReferenceQueue<>();
-
- private void registerParsedQuery(SimpleQuery query, String statementName) {
- if (statementName == null) {
- return;
- }
-
- PhantomReference cleanupRef =
- new PhantomReference<>(query, parsedQueryCleanupQueue);
- parsedQueryMap.put(cleanupRef, statementName);
- query.setCleanupRef(cleanupRef);
- }
-
- private void processDeadParsedQueries() throws IOException {
- Reference extends SimpleQuery> deadQuery;
- while ((deadQuery = parsedQueryCleanupQueue.poll()) != null) {
- String statementName = parsedQueryMap.remove(deadQuery);
- sendCloseStatement(statementName);
- deadQuery.clear();
- }
- }
-
- //
- // Essentially the same strategy is used for the cleanup of portals.
- // Note that each Portal holds a reference to the corresponding Query
- // that generated it, so the Query won't be collected (and the statement
- // closed) until all the Portals are, too. This is required by the mechanics
- // of the backend protocol: when a statement is closed, all dependent portals
- // are also closed.
- //
-
- private final HashMap, String> openPortalMap =
- new HashMap<>();
- private final ReferenceQueue openPortalCleanupQueue = new ReferenceQueue<>();
-
- private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed");
-
- private void registerOpenPortal(Portal portal) {
- if (portal == UNNAMED_PORTAL) {
- return; // Using the unnamed portal.
- }
-
- String portalName = portal.getPortalName();
- PhantomReference cleanupRef =
- new PhantomReference<>(portal, openPortalCleanupQueue);
- openPortalMap.put(cleanupRef, portalName);
- portal.setCleanupRef(cleanupRef);
- }
-
- private void processDeadPortals() throws IOException {
- Reference extends Portal> deadPortal;
- while ((deadPortal = openPortalCleanupQueue.poll()) != null) {
- String portalName = openPortalMap.remove(deadPortal);
- sendClosePortal(portalName);
- deadPortal.clear();
- }
- }
-
- protected void processResults(ResultHandler handler, int flags) throws IOException {
- processResults(handler, flags, false);
- }
-
- protected void processResults(ResultHandler handler, int flags, boolean adaptiveFetch)
- throws IOException {
- boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
- boolean bothRowsAndStatus = (flags & QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS) != 0;
-
- List tuples = null;
-
- int c;
- boolean endQuery = false;
-
- // At the end of a command execution we have the CommandComplete
- // message to tell us we're done, but with a describeOnly command
- // we have no real flag to let us know we're done. We've got to
- // look for the next RowDescription or NoData message and return
- // from there.
- boolean doneAfterRowDescNoData = false;
-
- while (!endQuery) {
- c = pgStream.receiveChar();
- switch (c) {
- case 'A': // Asynchronous Notify
- receiveAsyncNotify();
- break;
-
- case '1': // Parse Complete (response to Parse)
- pgStream.receiveInteger4(); // len, discarded
-
- SimpleQuery parsedQuery = pendingParseQueue.removeFirst();
- String parsedStatementName = parsedQuery.getStatementName();
-
- LOGGER.log(Level.FINEST, " <=BE ParseComplete [{0}]", parsedStatementName);
-
- break;
-
- case 't': { // ParameterDescription
- pgStream.receiveInteger4(); // len, discarded
-
- LOGGER.log(Level.FINEST, " <=BE ParameterDescription");
-
- DescribeRequest describeData = pendingDescribeStatementQueue.getFirst();
- SimpleQuery query = describeData.query;
- SimpleParameterList params = describeData.parameterList;
- boolean describeOnly = describeData.describeOnly;
- // This might differ from query.getStatementName if the query was re-prepared
- String origStatementName = describeData.statementName;
-
- int numParams = pgStream.receiveInteger2();
-
- for (int i = 1; i <= numParams; i++) {
- int typeOid = pgStream.receiveInteger4();
- params.setResolvedType(i, typeOid);
- }
-
- // Since we can issue multiple Parse and DescribeStatement
- // messages in a single network trip, we need to make
- // sure the describe results we requested are still
- // applicable to the latest parsed query.
- //
- if ((origStatementName == null && query.getStatementName() == null)
- || (origStatementName != null
- && origStatementName.equals(query.getStatementName()))) {
- query.setPrepareTypes(params.getTypeOIDs());
- }
-
- if (describeOnly) {
- doneAfterRowDescNoData = true;
- } else {
- pendingDescribeStatementQueue.removeFirst();
- }
- break;
- }
-
- case '2': // Bind Complete (response to Bind)
- pgStream.receiveInteger4(); // len, discarded
-
- Portal boundPortal = pendingBindQueue.removeFirst();
- LOGGER.log(Level.FINEST, " <=BE BindComplete [{0}]", boundPortal);
-
- registerOpenPortal(boundPortal);
- break;
-
- case '3': // Close Complete (response to Close)
- pgStream.receiveInteger4(); // len, discarded
- LOGGER.log(Level.FINEST, " <=BE CloseComplete");
- break;
-
- case 'n': // No Data (response to Describe)
- pgStream.receiveInteger4(); // len, discarded
- LOGGER.log(Level.FINEST, " <=BE NoData");
-
- pendingDescribePortalQueue.removeFirst();
-
- if (doneAfterRowDescNoData) {
- DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
- SimpleQuery currentQuery = describeData.query;
-
- Field[] fields = currentQuery.getFields();
-
- if (fields != null) { // There was a resultset.
- tuples = new ArrayList<>();
- handler.handleResultRows(currentQuery, fields, tuples, null);
- tuples = null;
- }
- }
- break;
-
- case 's': { // Portal Suspended (end of Execute)
- // nb: this appears *instead* of CommandStatus.
- // Must be a SELECT if we suspended, so don't worry about it.
-
- pgStream.receiveInteger4(); // len, discarded
- LOGGER.log(Level.FINEST, " <=BE PortalSuspended");
-
- ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
- SimpleQuery currentQuery = executeData.query;
- Portal currentPortal = executeData.portal;
-
- if (currentPortal != null) {
- // Existence of portal defines if query was using fetching.
- adaptiveFetchCache
- .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
- }
- pgStream.clearMaxRowSizeBytes();
-
- Field[] fields = currentQuery.getFields();
- if (fields != null && tuples == null) {
- // When no results expected, pretend an empty resultset was returned
- // Not sure if new ArrayList can be always replaced with emptyList
- tuples = noResults ? Collections.emptyList() : new ArrayList();
- }
-
- if (fields != null && tuples != null) {
- handler.handleResultRows(currentQuery, fields, tuples, currentPortal);
- }
- tuples = null;
- break;
- }
-
- case 'C': { // Command Status (end of Execute)
- // Handle status.
- String status = receiveCommandStatus();
- if (isFlushCacheOnDeallocate()
- && (status.startsWith("DEALLOCATE ALL") || status.startsWith("DISCARD ALL"))) {
- deallocateEpoch++;
- }
-
- doneAfterRowDescNoData = false;
-
- ExecuteRequest executeData = pendingExecuteQueue.peekFirst();
- SimpleQuery currentQuery = executeData.query;
- Portal currentPortal = executeData.portal;
-
- if (currentPortal != null) {
- // Existence of portal defines if query was using fetching.
-
- // Command executed, adaptive fetch size can be removed for this query, max row size can be cleared
- adaptiveFetchCache.removeQuery(adaptiveFetch, currentQuery);
- // Update to change fetch size for other fetch portals of this query
- adaptiveFetchCache
- .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
- }
- pgStream.clearMaxRowSizeBytes();
-
- if (status.startsWith("SET")) {
- String nativeSql = currentQuery.getNativeQuery().nativeSql;
- // Scan only the first 1024 characters to
- // avoid big overhead for long queries.
- if (nativeSql.lastIndexOf("search_path", 1024) != -1
- && !nativeSql.equals(lastSetSearchPathQuery)) {
- // Search path was changed, invalidate prepared statement cache
- lastSetSearchPathQuery = nativeSql;
- deallocateEpoch++;
- }
- }
-
- if (!executeData.asSimple) {
- pendingExecuteQueue.removeFirst();
- } else {
- // For simple 'Q' queries, executeQueue is cleared via ReadyForQuery message
- }
-
- // we want to make sure we do not add any results from these queries to the result set
- if (currentQuery == autoSaveQuery
- || currentQuery == releaseAutoSave) {
- // ignore "SAVEPOINT" or RELEASE SAVEPOINT status from autosave query
- break;
- }
-
- Field[] fields = currentQuery.getFields();
- if (fields != null && tuples == null) {
- // When no results expected, pretend an empty resultset was returned
- // Not sure if new ArrayList can be always replaced with emptyList
- tuples = noResults ? Collections.emptyList() : new ArrayList();
- }
-
- // If we received tuples we must know the structure of the
- // resultset, otherwise we won't be able to fetch columns
- // from it, etc, later.
- if (fields == null && tuples != null) {
- throw new IllegalStateException(
- "Received resultset tuples, but no field structure for them");
- }
-
- if (fields != null && tuples != null) {
- // There was a resultset.
- handler.handleResultRows(currentQuery, fields, tuples, null);
- tuples = null;
-
- if (bothRowsAndStatus) {
- interpretCommandStatus(status, handler);
- }
- } else {
- interpretCommandStatus(status, handler);
- }
-
- if (executeData.asSimple) {
- // Simple queries might return several resultsets, thus we clear
- // fields, so queries like "select 1;update; select2" will properly
- // identify that "update" did not return any results
- currentQuery.setFields(null);
- }
-
- if (currentPortal != null) {
- currentPortal.close();
- }
- break;
- }
-
- case 'D': // Data Transfer (ongoing Execute response)
- Tuple tuple = null;
- try {
- tuple = pgStream.receiveTupleV3();
- } catch (OutOfMemoryError oome) {
- if (!noResults) {
- handler.handleError(
- new PSQLException(GT.tr("Ran out of memory retrieving query results."),
- PSQLState.OUT_OF_MEMORY, oome));
- }
- } catch (SQLException e) {
- handler.handleError(e);
- }
- if (!noResults) {
- if (tuples == null) {
- tuples = new ArrayList<>();
- }
- if (tuple != null) {
- tuples.add(tuple);
- }
- }
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- int length;
- if (tuple == null) {
- length = -1;
- } else {
- length = tuple.length();
- }
- LOGGER.log(Level.FINEST, " <=BE DataRow(len={0})", length);
- }
-
- break;
-
- case 'E':
- // Error Response (response to pretty much everything; backend then skips until Sync)
- SQLException error = receiveErrorResponse();
- handler.handleError(error);
- if (willHealViaReparse(error)) {
- // prepared statement ... is not valid kind of error
- // Technically speaking, the error is unexpected, thus we invalidate other
- // server-prepared statements just in case.
- deallocateEpoch++;
+ waitOnLock();
if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " FE: received {0}, will invalidate statements. deallocateEpoch is now {1}",
- new Object[]{error.getSQLState(), deallocateEpoch});
+ LOGGER.log(Level.FINEST, " batch execute {0} queries, handler={1}, maxRows={2}, fetchSize={3}, flags={4}",
+ new Object[]{queries.length, batchHandler, maxRows, fetchSize, flags});
}
- }
- // keep processing
- break;
- case 'I': { // Empty Query (end of Execute)
- pgStream.receiveInteger4();
+ flags = updateQueryMode(flags);
- LOGGER.log(Level.FINEST, " <=BE EmptyQuery");
+ boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
+ // Check parameters and resolve OIDs.
+ if (!describeOnly) {
+ for (ParameterList parameterList : parameterLists) {
+ if (parameterList != null) {
+ ((V3ParameterList) parameterList).checkAllParametersSet();
+ }
+ }
+ }
- ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
- Portal currentPortal = executeData.portal;
- handler.handleCommandStatus("EMPTY", 0, 0);
- if (currentPortal != null) {
- currentPortal.close();
- }
- break;
+ boolean autosave = false;
+ ResultHandler handler = batchHandler;
+ try {
+ handler = sendQueryPreamble(batchHandler, flags);
+ autosave = sendAutomaticSavepoint(queries[0], flags);
+ estimatedReceiveBufferBytes = 0;
+
+ for (int i = 0; i < queries.length; i++) {
+ Query query = queries[i];
+ V3ParameterList parameters = (V3ParameterList) parameterLists[i];
+ if (parameters == null) {
+ parameters = SimpleQuery.NO_PARAMETERS;
+ }
+
+ sendQuery(query, parameters, maxRows, fetchSize, flags, handler, batchHandler, adaptiveFetch);
+
+ if (handler.getException() != null) {
+ break;
+ }
+ }
+
+ if (handler.getException() == null) {
+ if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
+ // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
+ // on its own
+ } else {
+ sendSync();
+ }
+ processResults(handler, flags, adaptiveFetch);
+ estimatedReceiveBufferBytes = 0;
+ }
+ } catch (IOException e) {
+ abort();
+ handler.handleError(
+ new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+ PSQLState.CONNECTION_FAILURE, e));
+ }
+
+ try {
+ handler.handleCompletion();
+ if (cleanupSavePoints) {
+ releaseSavePoint(autosave, flags);
+ }
+ } catch (SQLException e) {
+ rollbackIfRequired(autosave, e);
+ }
}
-
- case 'N': // Notice Response
- SQLWarning warning = receiveNoticeResponse();
- handler.handleWarning(warning);
- break;
-
- case 'S': // Parameter Status
- try {
- receiveParameterStatus();
- } catch (SQLException e) {
- handler.handleError(e);
- endQuery = true;
- }
- break;
-
- case 'T': // Row Description (response to Describe)
- Field[] fields = receiveFields();
- tuples = new ArrayList<>();
-
- SimpleQuery query = pendingDescribePortalQueue.peekFirst();
- if (!pendingExecuteQueue.isEmpty()
- && !pendingExecuteQueue.peekFirst().asSimple) {
- pendingDescribePortalQueue.removeFirst();
- }
- query.setFields(fields);
-
- if (doneAfterRowDescNoData) {
- DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
- SimpleQuery currentQuery = describeData.query;
- currentQuery.setFields(fields);
-
- handler.handleResultRows(currentQuery, fields, tuples, null);
- tuples = null;
- }
- break;
-
- case 'Z': // Ready For Query (eventual response to Sync)
- receiveRFQ();
- if (!pendingExecuteQueue.isEmpty()
- && pendingExecuteQueue.peekFirst().asSimple) {
- tuples = null;
- pgStream.clearResultBufferCount();
-
- ExecuteRequest executeRequest = pendingExecuteQueue.removeFirst();
- // Simple queries might return several resultsets, thus we clear
- // fields, so queries like "select 1;update; select2" will properly
- // identify that "update" did not return any results
- executeRequest.query.setFields(null);
-
- pendingDescribePortalQueue.removeFirst();
- if (!pendingExecuteQueue.isEmpty()) {
- if (getTransactionState() == TransactionState.IDLE) {
- handler.secureProgress();
- }
- // process subsequent results (e.g. for cases like batched execution of simple 'Q' queries)
- break;
- }
- }
- endQuery = true;
-
- // Reset the statement name of Parses that failed.
- while (!pendingParseQueue.isEmpty()) {
- SimpleQuery failedQuery = pendingParseQueue.removeFirst();
- failedQuery.unprepare();
- }
-
- pendingParseQueue.clear(); // No more ParseComplete messages expected.
- // Pending "describe" requests might be there in case of error
- // If that is the case, reset "described" status, so the statement is properly
- // described on next execution
- while (!pendingDescribeStatementQueue.isEmpty()) {
- DescribeRequest request = pendingDescribeStatementQueue.removeFirst();
- LOGGER.log(Level.FINEST, " FE marking setStatementDescribed(false) for query {0}", request.query);
- request.query.setStatementDescribed(false);
- }
- while (!pendingDescribePortalQueue.isEmpty()) {
- SimpleQuery describePortalQuery = pendingDescribePortalQueue.removeFirst();
- LOGGER.log(Level.FINEST, " FE marking setPortalDescribed(false) for query {0}", describePortalQuery);
- describePortalQuery.setPortalDescribed(false);
- }
- pendingBindQueue.clear(); // No more BindComplete messages expected.
- pendingExecuteQueue.clear(); // No more query executions expected.
- break;
-
- case 'G': // CopyInResponse
- LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
- LOGGER.log(Level.FINEST, " FE=> CopyFail");
-
- // COPY sub-protocol is not implemented yet
- // We'll send a CopyFail message for COPY FROM STDIN so that
- // server does not wait for the data.
-
- byte[] buf = "COPY commands are only supported using the CopyManager API.".getBytes(StandardCharsets.US_ASCII);
- pgStream.sendChar('f');
- pgStream.sendInteger4(buf.length + 4 + 1);
- pgStream.send(buf);
- pgStream.sendChar(0);
- pgStream.flush();
- sendSync(); // send sync message
- skipMessage(); // skip the response message
- break;
-
- case 'H': // CopyOutResponse
- LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
-
- skipMessage();
- // In case of CopyOutResponse, we cannot abort data transfer,
- // so just throw an error and ignore CopyData messages
- handler.handleError(
- new PSQLException(GT.tr("COPY commands are only supported using the CopyManager API."),
- PSQLState.NOT_IMPLEMENTED));
- break;
-
- case 'c': // CopyDone
- skipMessage();
- LOGGER.log(Level.FINEST, " <=BE CopyDone");
- break;
-
- case 'd': // CopyData
- skipMessage();
- LOGGER.log(Level.FINEST, " <=BE CopyData");
- break;
-
- default:
- throw new IOException("Unexpected packet type: " + c);
- }
-
}
- }
- /**
- * Ignore the response message by reading the message length and skipping over those bytes in the
- * communication stream.
- */
- private void skipMessage() throws IOException {
- int len = pgStream.receiveInteger4();
+ //
+ // Message sending
+ //
- assert len >= 4 : "Length from skip message must be at least 4 ";
-
- // skip len-4 (length includes the 4 bytes for message length itself
- pgStream.skip(len - 4);
- }
-
- @Override
- public void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize,
- boolean adaptiveFetch) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- waitOnLock();
- final Portal portal = (Portal) cursor;
-
- // Insert a ResultHandler that turns bare command statuses into empty datasets
- // (if the fetch returns no rows, we see just a CommandStatus..)
- final ResultHandler delegateHandler = handler;
- final SimpleQuery query = portal.getQuery();
- handler = new ResultHandlerDelegate(delegateHandler) {
- @Override
- public void handleCommandStatus(String status, long updateCount, long insertOID) {
- handleResultRows(query, NO_FIELDS, new ArrayList<>(), null);
- }
- };
-
- // Now actually run it.
-
- try {
+ private ResultHandler sendQueryPreamble(final ResultHandler delegateHandler, int flags)
+ throws IOException {
+ // First, send CloseStatements for finalized SimpleQueries that had statement names assigned.
processDeadParsedQueries();
processDeadPortals();
- sendExecute(query, portal, fetchSize);
- sendSync();
-
- processResults(handler, 0, adaptiveFetch);
- estimatedReceiveBufferBytes = 0;
- } catch (IOException e) {
- abort();
- handler.handleError(
- new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
- PSQLState.CONNECTION_FAILURE, e));
- }
-
- handler.handleCompletion();
- }
- }
-
- @Override
- public int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor) {
- if (cursor instanceof Portal) {
- Query query = ((Portal) cursor).getQuery();
- if (Objects.nonNull(query)) {
- return adaptiveFetchCache
- .getFetchSizeForQuery(adaptiveFetch, query);
- }
- }
- return -1;
- }
-
- @Override
- public void setAdaptiveFetch(boolean adaptiveFetch) {
- this.adaptiveFetchCache.setAdaptiveFetch(adaptiveFetch);
- }
-
- @Override
- public boolean getAdaptiveFetch() {
- return this.adaptiveFetchCache.getAdaptiveFetch();
- }
-
- @Override
- public void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
- if (cursor instanceof Portal) {
- Query query = ((Portal) cursor).getQuery();
- if (Objects.nonNull(query)) {
- adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
- }
- }
- }
-
- @Override
- public void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
- if (cursor instanceof Portal) {
- Query query = ((Portal) cursor).getQuery();
- if (Objects.nonNull(query)) {
- adaptiveFetchCache.removeQuery(adaptiveFetch, query);
- }
- }
- }
-
- /*
- * Receive the field descriptions from the back end.
- */
- private Field[] receiveFields() throws IOException {
- pgStream.receiveInteger4(); // MESSAGE SIZE
- int size = pgStream.receiveInteger2();
- Field[] fields = new Field[size];
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE RowDescription({0})", size);
- }
-
- for (int i = 0; i < fields.length; i++) {
- String columnLabel = pgStream.receiveCanonicalString();
- int tableOid = pgStream.receiveInteger4();
- short positionInTable = (short) pgStream.receiveInteger2();
- int typeOid = pgStream.receiveInteger4();
- int typeLength = pgStream.receiveInteger2();
- int typeModifier = pgStream.receiveInteger4();
- int formatType = pgStream.receiveInteger2();
- fields[i] = new Field(columnLabel,
- typeOid, typeLength, typeModifier, tableOid, positionInTable);
- fields[i].setFormat(formatType);
-
- LOGGER.log(Level.FINEST, " {0}", fields[i]);
- }
-
- return fields;
- }
-
- private void receiveAsyncNotify() throws IOException {
- int len = pgStream.receiveInteger4(); // MESSAGE SIZE
- assert len > 4 : "Length for AsyncNotify must be at least 4";
-
- int pid = pgStream.receiveInteger4();
- String msg = pgStream.receiveCanonicalString();
- String param = pgStream.receiveString();
- addNotification(new Notification(msg, pid, param));
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE AsyncNotify({0},{1},{2})", new Object[]{pid, msg, param});
- }
- }
-
- private SQLException receiveErrorResponse() throws IOException {
- // it's possible to get more than one error message for a query
- // see libpq comments wrt backend closing a connection
- // so, append messages to a string buffer and keep processing
- // check at the bottom to see if we need to throw an exception
-
- int elen = pgStream.receiveInteger4();
- assert elen > 4 : "Error response length must be greater than 4";
-
- EncodingPredictor.DecodeResult totalMessage = pgStream.receiveErrorString(elen - 4);
- ServerErrorMessage errorMsg = new ServerErrorMessage(totalMessage);
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg.toString());
- }
-
- PSQLException error = new PSQLException(errorMsg, this.logServerErrorDetail);
- if (transactionFailCause == null) {
- transactionFailCause = error;
- } else {
- error.initCause(transactionFailCause);
- }
- return error;
- }
-
- private SQLWarning receiveNoticeResponse() throws IOException {
- int nlen = pgStream.receiveInteger4();
- assert nlen > 4 : "Notice Response length must be greater than 4";
-
- ServerErrorMessage warnMsg = new ServerErrorMessage(pgStream.receiveString(nlen - 4));
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE NoticeResponse({0})", warnMsg.toString());
- }
-
- return new PSQLWarning(warnMsg);
- }
-
- private String receiveCommandStatus() throws IOException {
- // TODO: better handle the msg len
- int len = pgStream.receiveInteger4();
- // read len -5 bytes (-4 for len and -1 for trailing \0)
- String status = pgStream.receiveString(len - 5);
- // now read and discard the trailing \0
- pgStream.receiveChar(); // Receive(1) would allocate new byte[1], so avoid it
-
- LOGGER.log(Level.FINEST, " <=BE CommandStatus({0})", status);
-
- return status;
- }
-
- private void interpretCommandStatus(String status, ResultHandler handler) {
- try {
- commandCompleteParser.parse(status);
- } catch (SQLException e) {
- handler.handleError(e);
- return;
- }
- long oid = commandCompleteParser.getOid();
- long count = commandCompleteParser.getRows();
-
- handler.handleCommandStatus(status, count, oid);
- }
-
- private void receiveRFQ() throws IOException {
- if (pgStream.receiveInteger4() != 5) {
- throw new IOException("unexpected length of ReadyForQuery message");
- }
-
- char tStatus = (char) pgStream.receiveChar();
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE ReadyForQuery({0})", tStatus);
- }
-
- // Update connection state.
- switch (tStatus) {
- case 'I':
- transactionFailCause = null;
- setTransactionState(TransactionState.IDLE);
- break;
- case 'T':
- transactionFailCause = null;
- setTransactionState(TransactionState.OPEN);
- break;
- case 'E':
- setTransactionState(TransactionState.FAILED);
- break;
- default:
- throw new IOException(
- "unexpected transaction state in ReadyForQuery message: " + (int) tStatus);
- }
- }
-
- @Override
- @SuppressWarnings("deprecation")
- protected void sendCloseMessage() throws IOException {
- closeAction.sendCloseMessage(pgStream);
- }
-
- public void readStartupMessages() throws IOException, SQLException {
- for (int i = 0; i < 1000; i++) {
- int beresp = pgStream.receiveChar();
- switch (beresp) {
- case 'Z':
- receiveRFQ();
- // Ready For Query; we're done.
- return;
-
- case 'K':
- // BackendKeyData
- int msgLen = pgStream.receiveInteger4();
- if (msgLen != 12) {
- throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
- PSQLState.PROTOCOL_VIOLATION);
- }
-
- int pid = pgStream.receiveInteger4();
- int ckey = pgStream.receiveInteger4();
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE BackendKeyData(pid={0},ckey={1})", new Object[]{pid, ckey});
- }
-
- setBackendKeyData(pid, ckey);
- break;
-
- case 'E':
- // Error
- throw receiveErrorResponse();
-
- case 'N':
- // Warning
- addWarning(receiveNoticeResponse());
- break;
-
- case 'S':
- // ParameterStatus
- receiveParameterStatus();
-
- break;
-
- default:
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " invalid message type={0}", (char) beresp);
- }
- throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
- PSQLState.PROTOCOL_VIOLATION);
- }
- }
- throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
- PSQLState.PROTOCOL_VIOLATION);
- }
-
- public void receiveParameterStatus() throws IOException, SQLException {
- // ParameterStatus
- pgStream.receiveInteger4(); // MESSAGE SIZE
- final String name = pgStream.receiveCanonicalStringIfPresent();
- final String value = pgStream.receiveCanonicalStringIfPresent();
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE ParameterStatus({0} = {1})", new Object[]{name, value});
- }
-
- // if the name is empty, there is nothing to do
- if (name.isEmpty()) {
- return;
- }
-
- // Update client-visible parameter status map for getParameterStatuses()
- onParameterStatus(name, value);
-
- if ("client_encoding".equals(name)) {
- if (allowEncodingChanges) {
- if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
- LOGGER.log(Level.FINE,
- "pgjdbc expects client_encoding to be UTF8 for proper operation. Actual encoding is {0}",
- value);
+ // Send BEGIN on first statement in transaction.
+ if ((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) != 0
+ || getTransactionState() != TransactionState.IDLE) {
+ return delegateHandler;
}
- pgStream.setEncoding(Encoding.getDatabaseEncoding(value));
- } else if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
- close(); // we're screwed now; we can't trust any subsequent string.
- throw new PSQLException(GT.tr(
- "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.",
- value), PSQLState.CONNECTION_FAILURE);
- }
+ int beginFlags = QueryExecutor.QUERY_NO_METADATA;
+ if ((flags & QueryExecutor.QUERY_ONESHOT) != 0) {
+ beginFlags |= QueryExecutor.QUERY_ONESHOT;
+ }
+
+ beginFlags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+
+ beginFlags = updateQueryMode(beginFlags);
+
+ final SimpleQuery beginQuery = (flags & QueryExecutor.QUERY_READ_ONLY_HINT) == 0 ? beginTransactionQuery : beginReadOnlyTransactionQuery;
+
+ sendOneQuery(beginQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
+
+ // Insert a handler that intercepts the BEGIN.
+ return new ResultHandlerDelegate(delegateHandler) {
+ private boolean sawBegin = false;
+
+ @Override
+ public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
+ ResultCursor cursor) {
+ if (sawBegin) {
+ super.handleResultRows(fromQuery, fields, tuples, cursor);
+ }
+ }
+
+ @Override
+ public void handleCommandStatus(String status, long updateCount, long insertOID) {
+ if (!sawBegin) {
+ sawBegin = true;
+ if (!"BEGIN".equals(status)) {
+ handleError(new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
+ PSQLState.PROTOCOL_VIOLATION));
+ }
+ } else {
+ super.handleCommandStatus(status, updateCount, insertOID);
+ }
+ }
+ };
}
- if ("DateStyle".equals(name) && !value.startsWith("ISO")
- && !value.toUpperCase(Locale.ROOT).startsWith("ISO")) {
- close(); // we're screwed now; we can't trust any subsequent date.
- throw new PSQLException(GT.tr(
- "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.",
- value), PSQLState.CONNECTION_FAILURE);
+ @Override
+ @SuppressWarnings("deprecation")
+ public byte[] fastpathCall(int fnid, ParameterList parameters,
+ boolean suppressBegin)
+ throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ waitOnLock();
+ if (!suppressBegin) {
+ doSubprotocolBegin();
+ }
+ try {
+ sendFastpathCall(fnid, (SimpleParameterList) parameters);
+ return receiveFastpathResult();
+ } catch (IOException ioe) {
+ abort();
+ throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
}
- if ("standard_conforming_strings".equals(name)) {
- if ("on".equals(value)) {
- setStandardConformingStrings(true);
- } else if ("off".equals(value)) {
- setStandardConformingStrings(false);
- } else {
- close();
- // we're screwed now; we don't know how to escape string literals
- throw new PSQLException(GT.tr(
- "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.",
- value), PSQLState.CONNECTION_FAILURE);
- }
- return;
+ public void doSubprotocolBegin() throws SQLException {
+ if (getTransactionState() == TransactionState.IDLE) {
+
+ LOGGER.log(Level.FINEST, "Issuing BEGIN before fastpath or copy call.");
+
+ ResultHandler handler = new ResultHandlerBase() {
+ private boolean sawBegin = false;
+
+ @Override
+ public void handleCommandStatus(String status, long updateCount, long insertOID) {
+ if (!sawBegin) {
+ if (!"BEGIN".equals(status)) {
+ handleError(
+ new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
+ PSQLState.PROTOCOL_VIOLATION));
+ }
+ sawBegin = true;
+ } else {
+ handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status),
+ PSQLState.PROTOCOL_VIOLATION));
+ }
+ }
+
+ @Override
+ public void handleWarning(SQLWarning warning) {
+ // we don't want to ignore warnings and it would be tricky
+ // to chain them back to the connection, so since we don't
+ // expect to get them in the first place, we just consider
+ // them errors.
+ handleError(warning);
+ }
+ };
+
+ try {
+ /* Send BEGIN with simple protocol preferred */
+ int beginFlags = QueryExecutor.QUERY_NO_METADATA
+ | QueryExecutor.QUERY_ONESHOT
+ | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+ beginFlags = updateQueryMode(beginFlags);
+ sendOneQuery(beginTransactionQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
+ sendSync();
+ processResults(handler, 0);
+ estimatedReceiveBufferBytes = 0;
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
+
}
- if ("TimeZone".equals(name)) {
- setTimeZone(TimestampUtils.parseBackendTimeZone(value));
- } else if ("application_name".equals(name)) {
- setApplicationName(value);
- } else if ("server_version_num".equals(name)) {
- setServerVersionNum(Integer.parseInt(value));
- } else if ("server_version".equals(name)) {
- setServerVersion(value);
- } else if ("integer_datetimes".equals(name)) {
- if ("on".equals(value)) {
- setIntegerDateTimes(true);
- } else if ("off".equals(value)) {
- setIntegerDateTimes(false);
- } else {
+ @Override
+ @SuppressWarnings("deprecation")
+ public ParameterList createFastpathParameters(int count) {
+ return new SimpleParameterList(count, this);
+ }
+
+ private void sendFastpathCall(int fnid, SimpleParameterList params)
+ throws SQLException, IOException {
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " FE=> FunctionCall({0}, {1} params)", new Object[]{fnid, params.getParameterCount()});
+ }
+
+ //
+ // Total size = 4 (length)
+ // + 4 (function OID)
+ // + 2 (format code count) + N * 2 (format codes)
+ // + 2 (parameter count) + encodedSize (parameters)
+ // + 2 (result format)
+
+ int paramCount = params.getParameterCount();
+ int encodedSize = 0;
+ for (int i = 1; i <= paramCount; i++) {
+ if (params.isNull(i)) {
+ encodedSize += 4;
+ } else {
+ encodedSize += 4 + params.getV3Length(i);
+ }
+ }
+
+ pgStream.sendChar('F');
+ pgStream.sendInteger4(4 + 4 + 2 + 2 * paramCount + 2 + encodedSize + 2);
+ pgStream.sendInteger4(fnid);
+ pgStream.sendInteger2(paramCount);
+ for (int i = 1; i <= paramCount; i++) {
+ pgStream.sendInteger2(params.isBinary(i) ? 1 : 0);
+ }
+ pgStream.sendInteger2(paramCount);
+ for (int i = 1; i <= paramCount; i++) {
+ if (params.isNull(i)) {
+ pgStream.sendInteger4(-1);
+ } else {
+ pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
+ params.writeV3Value(i, pgStream);
+ }
+ }
+ pgStream.sendInteger2(1); // Binary result format
+ pgStream.flush();
+ }
+
+ // Just for API compatibility with previous versions.
+ @Override
+ public void processNotifies() throws SQLException {
+ processNotifies(-1);
+ }
+
+ /**
+ * @param timeoutMillis when > 0, block for this time
+ * when =0, block forever
+ * when < 0, don't block
+ */
+ @Override
+ public void processNotifies(int timeoutMillis) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ waitOnLock();
+ // Asynchronous notifies only arrive when we are not in a transaction
+ if (getTransactionState() != TransactionState.IDLE) {
+ return;
+ }
+
+ if (hasNotifications()) {
+ // No need to timeout when there are already notifications. We just check for more in this case.
+ timeoutMillis = -1;
+ }
+
+ boolean useTimeout = timeoutMillis > 0;
+ long startTime = 0L;
+ int oldTimeout = 0;
+ if (useTimeout) {
+ startTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
+ try {
+ oldTimeout = pgStream.getSocket().getSoTimeout();
+ } catch (SocketException e) {
+ throw new PSQLException(GT.tr("An error occurred while trying to get the socket "
+ + "timeout."), PSQLState.CONNECTION_FAILURE, e);
+ }
+ }
+
+ try {
+ while (timeoutMillis >= 0 || pgStream.hasMessagePending()) {
+ if (useTimeout && timeoutMillis >= 0) {
+ setSocketTimeout(timeoutMillis);
+ }
+ int c = pgStream.receiveChar();
+ if (useTimeout && timeoutMillis >= 0) {
+ setSocketTimeout(0); // Don't timeout after first char
+ }
+ switch (c) {
+ case 'A': // Asynchronous Notify
+ receiveAsyncNotify();
+ timeoutMillis = -1;
+ continue;
+ case 'E':
+ // Error Response (response to pretty much everything; backend then skips until Sync)
+ throw receiveErrorResponse();
+ case 'N': // Notice Response (warnings / info)
+ SQLWarning warning = receiveNoticeResponse();
+ addWarning(warning);
+ if (useTimeout) {
+ long newTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
+ timeoutMillis = timeoutMillis + (int) (startTime - newTimeMillis); // Overflows after 49 days, ignore that
+ startTime = newTimeMillis;
+ if (timeoutMillis == 0) {
+ timeoutMillis = -1; // Don't accidentally wait forever
+ }
+ }
+ break;
+ default:
+ throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
+ PSQLState.CONNECTION_FAILURE);
+ }
+ }
+ } catch (SocketTimeoutException ioe) {
+ // No notifications this time...
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ } finally {
+ if (useTimeout) {
+ setSocketTimeout(oldTimeout);
+ }
+ }
+ }
+ }
+
+ private void setSocketTimeout(int millis) throws PSQLException {
+ try {
+ Socket s = pgStream.getSocket();
+ if (!s.isClosed()) { // Is this check required?
+ pgStream.setNetworkTimeout(millis);
+ }
+ } catch (IOException e) {
+ throw new PSQLException(GT.tr("An error occurred while trying to reset the socket timeout."),
+ PSQLState.CONNECTION_FAILURE, e);
+ }
+ }
+
+ private byte[] receiveFastpathResult() throws IOException, SQLException {
+ boolean endQuery = false;
+ SQLException error = null;
+ byte[] returnValue = null;
+
+ while (!endQuery) {
+ int c = pgStream.receiveChar();
+ switch (c) {
+ case 'A': // Asynchronous Notify
+ receiveAsyncNotify();
+ break;
+
+ case 'E':
+ // Error Response (response to pretty much everything; backend then skips until Sync)
+ SQLException newError = receiveErrorResponse();
+ if (error == null) {
+ error = newError;
+ } else {
+ error.setNextException(newError);
+ }
+ // keep processing
+ break;
+
+ case 'N': // Notice Response (warnings / info)
+ SQLWarning warning = receiveNoticeResponse();
+ addWarning(warning);
+ break;
+
+ case 'Z': // Ready For Query (eventual response to Sync)
+ receiveRFQ();
+ endQuery = true;
+ break;
+
+ case 'V': // FunctionCallResponse
+ int msgLen = pgStream.receiveInteger4();
+ int valueLen = pgStream.receiveInteger4();
+
+ LOGGER.log(Level.FINEST, " <=BE FunctionCallResponse({0} bytes)", valueLen);
+
+ if (valueLen != -1) {
+ byte[] buf = new byte[valueLen];
+ pgStream.receive(buf, 0, valueLen);
+ returnValue = buf;
+ }
+
+ break;
+
+ case 'S': // Parameter Status
+ try {
+ receiveParameterStatus();
+ } catch (SQLException e) {
+ if (error == null) {
+ error = e;
+ } else {
+ error.setNextException(e);
+ }
+ endQuery = true;
+ }
+ break;
+
+ default:
+ throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
+ PSQLState.CONNECTION_FAILURE);
+ }
+
+ }
+
+ // did we get an error during this query?
+ if (error != null) {
+ throw error;
+ }
+
+ return returnValue;
+ }
+
+ /**
+ * Sends given query to BE to start, initialize and lock connection for a CopyOperation.
+ *
+ * @param sql COPY FROM STDIN / COPY TO STDOUT statement
+ * @return CopyIn or CopyOut operation object
+ * @throws SQLException on failure
+ */
+ @Override
+ public CopyOperation startCopy(String sql, boolean suppressBegin)
+ throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ waitOnLock();
+ if (!suppressBegin) {
+ doSubprotocolBegin();
+ }
+ byte[] buf = sql.getBytes(StandardCharsets.UTF_8);
+
+ try {
+ LOGGER.log(Level.FINEST, " FE=> Query(CopyStart)");
+
+ pgStream.sendChar('Q');
+ pgStream.sendInteger4(buf.length + 4 + 1);
+ pgStream.send(buf);
+ pgStream.sendChar(0);
+ pgStream.flush();
+
+ return processCopyResults(null, true);
+ // expect a CopyInResponse or CopyOutResponse to our query above
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Database connection failed when starting copy"),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
+ }
+
+ /**
+ * Locks connection and calls initializer for a new CopyOperation Called via startCopy ->
+ * processCopyResults.
+ *
+ * @param op an uninitialized CopyOperation
+ * @throws SQLException on locking failure
+ * @throws IOException on database connection failure
+ */
+ private void initCopy(CopyOperationImpl op) throws SQLException, IOException {
+ try (ResourceLock ignore = lock.obtain()) {
+ pgStream.receiveInteger4(); // length not used
+ int rowFormat = pgStream.receiveChar();
+ int numFields = pgStream.receiveInteger2();
+ int[] fieldFormats = new int[numFields];
+
+ for (int i = 0; i < numFields; i++) {
+ fieldFormats[i] = pgStream.receiveInteger2();
+ }
+
+ lock(op);
+ op.init(this, rowFormat, fieldFormats);
+ }
+ }
+
+ //
+ // Garbage collection of parsed statements.
+ //
+ // When a statement is successfully parsed, registerParsedQuery is called.
+ // This creates a PhantomReference referring to the "owner" of the statement
+ // (the originating Query object) and inserts that reference as a key in
+ // parsedQueryMap. The values of parsedQueryMap are the corresponding allocated
+ // statement names. The originating Query object also holds a reference to the
+ // PhantomReference.
+ //
+ // When the owning Query object is closed, it enqueues and clears the associated
+ // PhantomReference.
+ //
+ // If the owning Query object becomes unreachable (see java.lang.ref javadoc) before
+ // being closed, the corresponding PhantomReference is enqueued on
+ // parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued
+ // when a GC occurs, so this is not necessarily prompt but should eventually happen.
+ //
+ // Periodically (currently, just before query execution), the parsedQueryCleanupQueue
+ // is polled. For each enqueued PhantomReference we find, we remove the corresponding
+ // entry from parsedQueryMap, obtaining the name of the underlying statement in the
+ // process. Then we send a message to the backend to deallocate that statement.
+ //
+
+ /**
+ * Finishes a copy operation and unlocks connection discarding any exchanged data.
+ *
+ * @param op the copy operation presumably currently holding lock on this connection
+ * @throws SQLException on any additional failure
+ */
+ public void cancelCopy(CopyOperationImpl op) throws SQLException {
+ if (!hasLock(op)) {
+ throw new PSQLException(GT.tr("Tried to cancel an inactive copy operation"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ SQLException error = null;
+ int errors = 0;
+
+ try {
+ if (op instanceof CopyIn) {
+ try (ResourceLock ignore = lock.obtain()) {
+ LOGGER.log(Level.FINEST, "FE => CopyFail");
+ final byte[] msg = "Copy cancel requested".getBytes(StandardCharsets.US_ASCII);
+ pgStream.sendChar('f'); // CopyFail
+ pgStream.sendInteger4(5 + msg.length);
+ pgStream.send(msg);
+ pgStream.sendChar(0);
+ pgStream.flush();
+ do {
+ try {
+ processCopyResults(op, true); // discard rest of input
+ } catch (SQLException se) { // expected error response to failing copy
+ errors++;
+ if (error != null) {
+ SQLException e = se;
+ SQLException next;
+ while ((next = e.getNextException()) != null) {
+ e = next;
+ }
+ e.setNextException(error);
+ }
+ error = se;
+ }
+ } while (hasLock(op));
+ }
+ } else if (op instanceof CopyOut) {
+ sendQueryCancel();
+ }
+
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Database connection failed when canceling copy operation"),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ } finally {
+ // Need to ensure the lock isn't held anymore, or else
+ // future operations, rather than failing due to the
+ // broken connection, will simply hang waiting for this
+ // lock.
+ try (ResourceLock ignore = lock.obtain()) {
+ if (hasLock(op)) {
+ unlock(op);
+ }
+ }
+ }
+
+ if (op instanceof CopyIn) {
+ if (errors < 1) {
+ throw new PSQLException(GT.tr("Missing expected error response to copy cancel request"),
+ PSQLState.COMMUNICATION_ERROR);
+ } else if (errors > 1) {
+ throw new PSQLException(
+ GT.tr("Got {0} error responses to single copy cancel request", String.valueOf(errors)),
+ PSQLState.COMMUNICATION_ERROR, error);
+ }
+ }
+ }
+
+ /**
+ * Finishes writing to copy and unlocks connection.
+ *
+ * @param op the copy operation presumably currently holding lock on this connection
+ * @return number of rows updated for server versions 8.2 or newer
+ * @throws SQLException on failure
+ */
+ public long endCopy(CopyOperationImpl op) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (!hasLock(op)) {
+ throw new PSQLException(GT.tr("Tried to end inactive copy"), PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ try {
+ LOGGER.log(Level.FINEST, " FE=> CopyDone");
+
+ pgStream.sendChar('c'); // CopyDone
+ pgStream.sendInteger4(4);
+ pgStream.flush();
+
+ do {
+ processCopyResults(op, true);
+ } while (hasLock(op));
+ return op.getHandledRowCount();
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Database connection failed when ending copy"),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
+ }
+
+ /**
+ * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
+ * returns CommandComplete, which should not happen
+ *
+ * @param op the CopyIn operation presumably currently holding lock on this connection
+ * @param data bytes to send
+ * @param off index of first byte to send (usually 0)
+ * @param siz number of bytes to send (usually data.length)
+ * @throws SQLException on failure
+ */
+ public void writeToCopy(CopyOperationImpl op, byte[] data, int off, int siz)
+ throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (!hasLock(op)) {
+ throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
+
+ try {
+ pgStream.sendChar('d');
+ pgStream.sendInteger4(siz + 4);
+ pgStream.send(data, off, siz);
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
+ }
+
+ /**
+ * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
+ * returns CommandComplete, which should not happen
+ *
+ * @param op the CopyIn operation presumably currently holding lock on this connection
+ * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
+ * @throws SQLException on failure
+ */
+ public void writeToCopy(CopyOperationImpl op, ByteStreamWriter from)
+ throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (!hasLock(op)) {
+ throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ int siz = from.getLength();
+ LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
+
+ try {
+ pgStream.sendChar('d');
+ pgStream.sendInteger4(siz + 4);
+ pgStream.send(from);
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
+ }
+
+ //
+ // Essentially the same strategy is used for the cleanup of portals.
+ // Note that each Portal holds a reference to the corresponding Query
+ // that generated it, so the Query won't be collected (and the statement
+ // closed) until all the Portals are, too. This is required by the mechanics
+ // of the backend protocol: when a statement is closed, all dependent portals
+ // are also closed.
+ //
+
+ public void flushCopy(CopyOperationImpl op) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (!hasLock(op)) {
+ throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ try {
+ pgStream.flush();
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
+ }
+
+ /**
+ * Wait for a row of data to be received from server on an active copy operation
+ * Connection gets unlocked by processCopyResults() at end of operation.
+ *
+ * @param op the copy operation presumably currently holding lock on this connection
+ * @param block whether to block waiting for input
+ * @throws SQLException on any failure
+ */
+ void readFromCopy(CopyOperationImpl op, boolean block) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (!hasLock(op)) {
+ throw new PSQLException(GT.tr("Tried to read from inactive copy"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ try {
+ processCopyResults(op, block); // expect a call to handleCopydata() to store the data
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Database connection failed when reading from copy"),
+ PSQLState.CONNECTION_FAILURE, ioe);
+ }
+ }
+ }
+
+ /**
+ * Handles copy sub protocol responses from server. Unlocks at end of sub protocol, so operations
+ * on pgStream or QueryExecutor are not allowed in a method after calling this!
+ *
+ * @param block whether to block waiting for input
+ * @return CopyIn when COPY FROM STDIN starts; CopyOut when COPY TO STDOUT starts; null when copy
+ * ends; otherwise, the operation given as parameter.
+ * @throws SQLException in case of misuse
+ * @throws IOException from the underlying connection
+ */
+ CopyOperationImpl processCopyResults(CopyOperationImpl op, boolean block)
+ throws SQLException, IOException {
+
+ /*
+ * fixes issue #1592 where one thread closes the stream and another is reading it
+ */
+ if (pgStream.isClosed()) {
+ throw new PSQLException(GT.tr("PGStream is closed"),
+ PSQLState.CONNECTION_DOES_NOT_EXIST);
+ }
+ /*
+ * This is a hack as we should not end up here, but sometimes do with large copy operations.
+ */
+ if (!processingCopyResults.compareAndSet(false, true)) {
+ LOGGER.log(Level.INFO, "Ignoring request to process copy results, already processing");
+ return null;
+ }
+
+ // put this all in a try, finally block and reset the processingCopyResults in the finally clause
+ try {
+ boolean endReceiving = false;
+ SQLException error = null;
+ SQLException errors = null;
+ int len;
+
+ while (!endReceiving && (block || pgStream.hasMessagePending())) {
+
+ // There is a bug in the server's implementation of the copy
+ // protocol. It returns command complete immediately upon
+ // receiving the EOF marker in the binary protocol,
+ // potentially before we've issued CopyDone. When we are not
+ // blocking, we don't think we are done, so we hold off on
+ // processing command complete and any subsequent messages
+ // until we actually are done with the copy.
+ //
+ if (!block) {
+ int c = pgStream.peekChar();
+ if (c == 'C') {
+ // CommandComplete
+ LOGGER.log(Level.FINEST, " <=BE CommandStatus, Ignored until CopyDone");
+ break;
+ }
+ }
+
+ int c = pgStream.receiveChar();
+ switch (c) {
+
+ case 'A': // Asynchronous Notify
+
+ LOGGER.log(Level.FINEST, " <=BE Asynchronous Notification while copying");
+
+ receiveAsyncNotify();
+ break;
+
+ case 'N': // Notice Response
+
+ LOGGER.log(Level.FINEST, " <=BE Notification while copying");
+
+ addWarning(receiveNoticeResponse());
+ break;
+
+ case 'C': // Command Complete
+
+ String status = receiveCommandStatus();
+
+ try {
+ if (op == null) {
+ throw new PSQLException(GT
+ .tr("Received CommandComplete ''{0}'' without an active copy operation", status),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ op.handleCommandStatus(status);
+ } catch (SQLException se) {
+ error = se;
+ }
+
+ block = true;
+ break;
+
+ case 'E': // ErrorMessage (expected response to CopyFail)
+
+ error = receiveErrorResponse();
+ // We've received the error and we now expect to receive
+ // Ready for query, but we must block because it might still be
+ // on the wire and not here yet.
+ block = true;
+ break;
+
+ case 'G': // CopyInResponse
+
+ LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
+
+ if (op != null) {
+ error = new PSQLException(GT.tr("Got CopyInResponse from server during an active {0}",
+ op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ op = new CopyInImpl();
+ initCopy(op);
+ endReceiving = true;
+ break;
+
+ case 'H': // CopyOutResponse
+
+ LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
+
+ if (op != null) {
+ error = new PSQLException(GT.tr("Got CopyOutResponse from server during an active {0}",
+ op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ op = new CopyOutImpl();
+ initCopy(op);
+ endReceiving = true;
+ break;
+
+ case 'W': // CopyBothResponse
+
+ LOGGER.log(Level.FINEST, " <=BE CopyBothResponse");
+
+ if (op != null) {
+ error = new PSQLException(GT.tr("Got CopyBothResponse from server during an active {0}",
+ op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ op = new CopyDualImpl();
+ initCopy(op);
+ endReceiving = true;
+ break;
+
+ case 'd': // CopyData
+
+ LOGGER.log(Level.FINEST, " <=BE CopyData");
+
+ len = pgStream.receiveInteger4() - 4;
+
+ assert len > 0 : "Copy Data length must be greater than 4";
+
+ byte[] buf = pgStream.receive(len);
+ if (op == null) {
+ error = new PSQLException(GT.tr("Got CopyData without an active copy operation"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ } else if (!(op instanceof CopyOut)) {
+ error = new PSQLException(
+ GT.tr("Unexpected copydata from server for {0}", op.getClass().getName()),
+ PSQLState.COMMUNICATION_ERROR);
+ } else {
+ op.handleCopydata(buf);
+ }
+ endReceiving = true;
+ break;
+
+ case 'c': // CopyDone (expected after all copydata received)
+
+ LOGGER.log(Level.FINEST, " <=BE CopyDone");
+
+ len = pgStream.receiveInteger4() - 4;
+ if (len > 0) {
+ pgStream.receive(len); // not in specification; should never appear
+ }
+
+ if (!(op instanceof CopyOut)) {
+ error = new PSQLException("Got CopyDone while not copying from server",
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ // keep receiving since we expect a CommandComplete
+ block = true;
+ break;
+ case 'S': // Parameter Status
+ try {
+ receiveParameterStatus();
+ } catch (SQLException e) {
+ error = e;
+ endReceiving = true;
+ }
+ break;
+
+ case 'Z': // ReadyForQuery: After FE:CopyDone => BE:CommandComplete
+
+ receiveRFQ();
+ if (op != null && hasLock(op)) {
+ unlock(op);
+ }
+ op = null;
+ endReceiving = true;
+ break;
+
+ // If the user sends a non-copy query, we've got to handle some additional things.
+ //
+ case 'T': // Row Description (response to Describe)
+ LOGGER.log(Level.FINEST, " <=BE RowDescription (during copy ignored)");
+
+ skipMessage();
+ break;
+
+ case 'D': // DataRow
+ LOGGER.log(Level.FINEST, " <=BE DataRow (during copy ignored)");
+
+ skipMessage();
+ break;
+
+ default:
+ throw new IOException(
+ GT.tr("Unexpected packet type during copy: {0}", Integer.toString(c)));
+ }
+
+ // Collect errors into a neat chain for completeness
+ if (error != null) {
+ if (errors != null) {
+ error.setNextException(errors);
+ }
+ errors = error;
+ error = null;
+ }
+ }
+
+ if (errors != null) {
+ throw errors;
+ }
+ return op;
+
+ } finally {
+ /*
+ reset here in the finally block to make sure it really is cleared
+ */
+ processingCopyResults.set(false);
+ }
+ }
+
+ /*
+ * To prevent client/server protocol deadlocks, we try to manage the estimated recv buffer size
+ * and force a sync +flush and process results if we think it might be getting too full.
+ *
+ * See the comments above MAX_BUFFERED_RECV_BYTES's declaration for details.
+ */
+ private void flushIfDeadlockRisk(Query query, boolean disallowBatching,
+ ResultHandler resultHandler,
+ BatchResultHandler batchHandler,
+ final int flags) throws IOException {
+ // Assume all statements need at least this much reply buffer space,
+ // plus params
+ estimatedReceiveBufferBytes += NODATA_QUERY_RESPONSE_SIZE_BYTES;
+
+ SimpleQuery sq = (SimpleQuery) query;
+ if (sq.isStatementDescribed()) {
+ /*
+ * Estimate the response size of the fields and add it to the expected response size.
+ *
+ * It's impossible for us to estimate the rowcount. We'll assume one row, as that's the common
+ * case for batches and we're leaving plenty of breathing room in this approach. It's still
+ * not deadlock-proof though; see pgjdbc github issues #194 and #195.
+ */
+ int maxResultRowSize = sq.getMaxResultRowSize();
+ if (maxResultRowSize >= 0) {
+ estimatedReceiveBufferBytes += maxResultRowSize;
+ } else {
+ LOGGER.log(Level.FINEST, "Couldn't estimate result size or result size unbounded, "
+ + "disabling batching for this query.");
+ disallowBatching = true;
+ }
+ } else {
+ /*
+ * We only describe a statement if we're expecting results from it, so it's legal to batch
+ * unprepared statements. We'll abort later if we get any uresults from them where none are
+ * expected. For now all we can do is hope the user told us the truth and assume that
+ * NODATA_QUERY_RESPONSE_SIZE_BYTES is enough to cover it.
+ */
+ }
+
+ if (disallowBatching || estimatedReceiveBufferBytes >= MAX_BUFFERED_RECV_BYTES) {
+ LOGGER.log(Level.FINEST, "Forcing Sync, receive buffer full or batching disallowed");
+ sendSync();
+ processResults(resultHandler, flags);
+ estimatedReceiveBufferBytes = 0;
+ if (batchHandler != null) {
+ batchHandler.secureProgress();
+ }
+ }
+
+ }
+
+ /*
+ * Send a query to the backend.
+ */
+ private void sendQuery(Query query, V3ParameterList parameters, int maxRows, int fetchSize,
+ int flags, ResultHandler resultHandler,
+ BatchResultHandler batchHandler, boolean adaptiveFetch) throws IOException, SQLException {
+ // Now the query itself.
+ Query[] subqueries = query.getSubqueries();
+ SimpleParameterList[] subparams = parameters.getSubparams();
+
+ // We know this is deprecated, but still respect it in case anyone's using it.
+ // PgJDBC its self no longer does.
+ @SuppressWarnings("deprecation")
+ boolean disallowBatching = (flags & QueryExecutor.QUERY_DISALLOW_BATCHING) != 0;
+
+ if (subqueries == null) {
+ flushIfDeadlockRisk(query, disallowBatching, resultHandler, batchHandler, flags);
+
+ // If we saw errors, don't send anything more.
+ if (resultHandler.getException() == null) {
+ if (fetchSize != 0) {
+ adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
+ }
+ sendOneQuery((SimpleQuery) query, (SimpleParameterList) parameters, maxRows, fetchSize,
+ flags);
+ }
+ } else {
+ for (int i = 0; i < subqueries.length; i++) {
+ final Query subquery = subqueries[i];
+ flushIfDeadlockRisk(subquery, disallowBatching, resultHandler, batchHandler, flags);
+
+ // If we saw errors, don't send anything more.
+ if (resultHandler.getException() != null) {
+ break;
+ }
+
+ // In the situation where parameters is already
+ // NO_PARAMETERS it cannot know the correct
+ // number of array elements to return in the
+ // above call to getSubparams(), so it must
+ // return null which we check for here.
+ //
+ SimpleParameterList subparam = SimpleQuery.NO_PARAMETERS;
+ if (subparams != null) {
+ subparam = subparams[i];
+ }
+ if (fetchSize != 0) {
+ adaptiveFetchCache.addNewQuery(adaptiveFetch, subquery);
+ }
+ sendOneQuery((SimpleQuery) subquery, subparam, maxRows, fetchSize, flags);
+ }
+ }
+ }
+
+ private void sendSync() throws IOException {
+ LOGGER.log(Level.FINEST, " FE=> Sync");
+
+ pgStream.sendChar('S'); // Sync
+ pgStream.sendInteger4(4); // Length
+ pgStream.flush();
+ // Below "add queues" are likely not required at all
+ pendingExecuteQueue.add(new ExecuteRequest(sync, null, true));
+ pendingDescribePortalQueue.add(sync);
+ }
+
+ private void sendParse(SimpleQuery query, SimpleParameterList params, boolean oneShot)
+ throws IOException {
+ // Already parsed, or we have a Parse pending and the types are right?
+ int[] typeOIDs = params.getTypeOIDs();
+ if (query.isPreparedFor(typeOIDs, deallocateEpoch)) {
+ return;
+ }
+
+ // Clean up any existing statement, as we can't use it.
+ query.unprepare();
+ processDeadParsedQueries();
+
+ // Remove any cached Field values. The re-parsed query might report different
+ // fields because input parameter types may result in different type inferences
+ // for unspecified types.
+ query.setFields(null);
+
+ String statementName = null;
+ if (!oneShot) {
+ // Generate a statement name to use.
+ statementName = "S_" + (nextUniqueID++);
+
+ // And prepare the new statement.
+ // NB: Must clone the OID array, as it's a direct reference to
+ // the SimpleParameterList's internal array that might be modified
+ // under us.
+ query.setStatementName(statementName, deallocateEpoch);
+ query.setPrepareTypes(typeOIDs);
+ registerParsedQuery(query, statementName);
+ }
+
+ byte[] encodedStatementName = query.getEncodedStatementName();
+ String nativeSql = query.getNativeSql();
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ StringBuilder sbuf = new StringBuilder(" FE=> Parse(stmt=" + statementName + ",query=\"");
+ sbuf.append(nativeSql);
+ sbuf.append("\",oids={");
+ for (int i = 1; i <= params.getParameterCount(); i++) {
+ if (i != 1) {
+ sbuf.append(",");
+ }
+ sbuf.append(params.getTypeOID(i));
+ }
+ sbuf.append("})");
+ LOGGER.log(Level.FINEST, sbuf.toString());
+ }
+
+ //
+ // Send Parse.
+ //
+
+ byte[] queryUtf8 = nativeSql.getBytes(StandardCharsets.UTF_8);
+
+ // Total size = 4 (size field)
+ // + N + 1 (statement name, zero-terminated)
+ // + N + 1 (query, zero terminated)
+ // + 2 (parameter count) + N * 4 (parameter types)
+ int encodedSize = 4
+ + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
+ + queryUtf8.length + 1
+ + 2 + 4 * params.getParameterCount();
+
+ pgStream.sendChar('P'); // Parse
+ pgStream.sendInteger4(encodedSize);
+ if (encodedStatementName != null) {
+ pgStream.send(encodedStatementName);
+ }
+ pgStream.sendChar(0); // End of statement name
+ pgStream.send(queryUtf8); // Query string
+ pgStream.sendChar(0); // End of query string.
+ pgStream.sendInteger2(params.getParameterCount()); // # of parameter types specified
+ for (int i = 1; i <= params.getParameterCount(); i++) {
+ pgStream.sendInteger4(params.getTypeOID(i));
+ }
+
+ pendingParseQueue.add(query);
+ }
+
+ private void sendBind(SimpleQuery query, SimpleParameterList params, Portal portal,
+ boolean noBinaryTransfer) throws IOException {
+ //
+ // Send Bind.
+ //
+
+ String statementName = query.getStatementName();
+ byte[] encodedStatementName = query.getEncodedStatementName();
+ byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ StringBuilder sbuf = new StringBuilder(" FE=> Bind(stmt=" + statementName + ",portal=" + portal);
+ for (int i = 1; i <= params.getParameterCount(); i++) {
+ sbuf.append(",$").append(i).append("=<")
+ .append(params.toString(i, true))
+ .append(">,type=").append(Oid.toString(params.getTypeOID(i)));
+ }
+ sbuf.append(")");
+ LOGGER.log(Level.FINEST, sbuf.toString());
+ }
+
+ // Total size = 4 (size field) + N + 1 (destination portal)
+ // + N + 1 (statement name)
+ // + 2 (param format code count) + N * 2 (format codes)
+ // + 2 (param value count) + N (encoded param value size)
+ // + 2 (result format code count, 0)
+ long encodedSize = 0;
+ for (int i = 1; i <= params.getParameterCount(); i++) {
+ if (params.isNull(i)) {
+ encodedSize += 4;
+ } else {
+ encodedSize += (long) 4 + params.getV3Length(i);
+ }
+ }
+
+ Field[] fields = query.getFields();
+ if (!noBinaryTransfer && query.needUpdateFieldFormats() && fields != null) {
+ for (Field field : fields) {
+ if (useBinary(field)) {
+ field.setFormat(Field.BINARY_FORMAT);
+ query.setHasBinaryFields(true);
+ }
+ }
+ }
+ // If text-only results are required (e.g. updateable resultset), and the query has binary columns,
+ // flip to text format.
+ if (noBinaryTransfer && query.hasBinaryFields() && fields != null) {
+ for (Field field : fields) {
+ if (field.getFormat() != Field.TEXT_FORMAT) {
+ field.setFormat(Field.TEXT_FORMAT);
+ }
+ }
+ query.resetNeedUpdateFieldFormats();
+ query.setHasBinaryFields(false);
+ }
+
+ // This is not the number of binary fields, but the total number
+ // of fields if any of them are binary or zero if all of them
+ // are text.
+ int numBinaryFields = !noBinaryTransfer && query.hasBinaryFields() && fields != null
+ ? fields.length : 0;
+
+ encodedSize = 4
+ + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1
+ + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
+ + 2 + params.getParameterCount() * 2
+ + 2 + encodedSize
+ + 2 + numBinaryFields * 2;
+
+ // backend's MaxAllocSize is the largest message that can
+ // be received from a client. If we have a bigger value
+ // from either very large parameters or incorrect length
+ // descriptions of setXXXStream we do not send the bind
+ // message.
+ //
+ if (encodedSize > 0x3fffffff) {
+ throw new PGBindException(new IOException(GT.tr(
+ "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters.",
+ encodedSize)));
+ }
+
+ pgStream.sendChar('B'); // Bind
+ pgStream.sendInteger4((int) encodedSize); // Message size
+ if (encodedPortalName != null) {
+ pgStream.send(encodedPortalName); // Destination portal name.
+ }
+ pgStream.sendChar(0); // End of portal name.
+ if (encodedStatementName != null) {
+ pgStream.send(encodedStatementName); // Source statement name.
+ }
+ pgStream.sendChar(0); // End of statement name.
+
+ pgStream.sendInteger2(params.getParameterCount()); // # of parameter format codes
+ for (int i = 1; i <= params.getParameterCount(); i++) {
+ pgStream.sendInteger2(params.isBinary(i) ? 1 : 0); // Parameter format code
+ }
+
+ pgStream.sendInteger2(params.getParameterCount()); // # of parameter values
+
+ // If an error occurs when reading a stream we have to
+ // continue pumping out data to match the length we
+ // said we would. Once we've done that we throw
+ // this exception. Multiple exceptions can occur and
+ // it really doesn't matter which one is reported back
+ // to the caller.
+ //
+ PGBindException bindException = null;
+
+ for (int i = 1; i <= params.getParameterCount(); i++) {
+ if (params.isNull(i)) {
+ pgStream.sendInteger4(-1); // Magic size of -1 means NULL
+ } else {
+ pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
+ try {
+ params.writeV3Value(i, pgStream); // Parameter value
+ } catch (PGBindException be) {
+ bindException = be;
+ }
+ }
+ }
+
+ pgStream.sendInteger2(numBinaryFields); // # of result format codes
+ for (int i = 0; fields != null && i < numBinaryFields; i++) {
+ pgStream.sendInteger2(fields[i].getFormat());
+ }
+
+ pendingBindQueue.add(portal == null ? UNNAMED_PORTAL : portal);
+
+ if (bindException != null) {
+ throw bindException;
+ }
+ }
+
+ /**
+ * Returns true if the specified field should be retrieved using binary encoding.
+ *
+ * @param field The field whose Oid type to analyse.
+ * @return True if {@link Field#BINARY_FORMAT} should be used, false if
+ * {@link Field#BINARY_FORMAT}.
+ */
+ private boolean useBinary(Field field) {
+ int oid = field.getOID();
+ return useBinaryForReceive(oid);
+ }
+
+ private void sendDescribePortal(SimpleQuery query, Portal portal) throws IOException {
+ //
+ // Send Describe.
+ //
+
+ LOGGER.log(Level.FINEST, " FE=> Describe(portal={0})", portal);
+
+ byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
+
+ // Total size = 4 (size field) + 1 (describe type, 'P') + N + 1 (portal name)
+ int encodedSize = 4 + 1 + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1;
+
+ pgStream.sendChar('D'); // Describe
+ pgStream.sendInteger4(encodedSize); // message size
+ pgStream.sendChar('P'); // Describe (Portal)
+ if (encodedPortalName != null) {
+ pgStream.send(encodedPortalName); // portal name to close
+ }
+ pgStream.sendChar(0); // end of portal name
+
+ pendingDescribePortalQueue.add(query);
+ query.setPortalDescribed(true);
+ }
+
+ private void sendDescribeStatement(SimpleQuery query, SimpleParameterList params,
+ boolean describeOnly) throws IOException {
+ // Send Statement Describe
+
+ LOGGER.log(Level.FINEST, " FE=> Describe(statement={0})", query.getStatementName());
+
+ byte[] encodedStatementName = query.getEncodedStatementName();
+
+ // Total size = 4 (size field) + 1 (describe type, 'S') + N + 1 (portal name)
+ int encodedSize = 4 + 1 + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1;
+
+ pgStream.sendChar('D'); // Describe
+ pgStream.sendInteger4(encodedSize); // Message size
+ pgStream.sendChar('S'); // Describe (Statement);
+ if (encodedStatementName != null) {
+ pgStream.send(encodedStatementName); // Statement name
+ }
+ pgStream.sendChar(0); // end message
+
+ // Note: statement name can change over time for the same query object
+ // Thus we take a snapshot of the query name
+ pendingDescribeStatementQueue.add(
+ new DescribeRequest(query, params, describeOnly, query.getStatementName()));
+ pendingDescribePortalQueue.add(query);
+ query.setStatementDescribed(true);
+ query.setPortalDescribed(true);
+ }
+
+ private void sendExecute(SimpleQuery query, Portal portal, int limit)
+ throws IOException {
+ //
+ // Send Execute.
+ //
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " FE=> Execute(portal={0},limit={1})", new Object[]{portal, limit});
+ }
+
+ byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
+ int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
+
+ // Total size = 4 (size field) + 1 + N (source portal) + 4 (max rows)
+ pgStream.sendChar('E'); // Execute
+ pgStream.sendInteger4(4 + 1 + encodedSize + 4); // message size
+ if (encodedPortalName != null) {
+ pgStream.send(encodedPortalName); // portal name
+ }
+ pgStream.sendChar(0); // portal name terminator
+ pgStream.sendInteger4(limit); // row limit
+
+ pendingExecuteQueue.add(new ExecuteRequest(query, portal, false));
+ }
+
+ private void sendClosePortal(String portalName) throws IOException {
+ //
+ // Send Close.
+ //
+
+ LOGGER.log(Level.FINEST, " FE=> ClosePortal({0})", portalName);
+
+ byte[] encodedPortalName = portalName == null ? null : portalName.getBytes(StandardCharsets.UTF_8);
+ int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
+
+ // Total size = 4 (size field) + 1 (close type, 'P') + 1 + N (portal name)
+ pgStream.sendChar('C'); // Close
+ pgStream.sendInteger4(4 + 1 + 1 + encodedSize); // message size
+ pgStream.sendChar('P'); // Close (Portal)
+ if (encodedPortalName != null) {
+ pgStream.send(encodedPortalName);
+ }
+ pgStream.sendChar(0); // unnamed portal
+ }
+
+ private void sendCloseStatement(String statementName) throws IOException {
+ //
+ // Send Close.
+ //
+
+ LOGGER.log(Level.FINEST, " FE=> CloseStatement({0})", statementName);
+
+ byte[] encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
+
+ // Total size = 4 (size field) + 1 (close type, 'S') + N + 1 (statement name)
+ pgStream.sendChar('C'); // Close
+ pgStream.sendInteger4(4 + 1 + encodedStatementName.length + 1); // message size
+ pgStream.sendChar('S'); // Close (Statement)
+ pgStream.send(encodedStatementName); // statement to close
+ pgStream.sendChar(0); // statement name terminator
+ }
+
+ // sendOneQuery sends a single statement via the extended query protocol.
+ // Per the FE/BE docs this is essentially the same as how a simple query runs
+ // (except that it generates some extra acknowledgement messages, and we
+ // can send several queries before doing the Sync)
+ //
+ // Parse S_n from "query string with parameter placeholders"; skipped if already done previously
+ // or if oneshot
+ // Bind C_n from S_n plus parameters (or from unnamed statement for oneshot queries)
+ // Describe C_n; skipped if caller doesn't want metadata
+ // Execute C_n with maxRows limit; maxRows = 1 if caller doesn't want results
+ // (above repeats once per call to sendOneQuery)
+ // Sync (sent by caller)
+ //
+ private void sendOneQuery(SimpleQuery query, SimpleParameterList params, int maxRows,
+ int fetchSize, int flags) throws IOException {
+ boolean asSimple = (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0;
+ if (asSimple) {
+ assert (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) == 0
+ : "Simple mode does not support describe requests. sql = " + query.getNativeSql()
+ + ", flags = " + flags;
+ sendSimpleQuery(query, params);
+ return;
+ }
+
+ assert !query.getNativeQuery().multiStatement
+ : "Queries that might contain ; must be executed with QueryExecutor.QUERY_EXECUTE_AS_SIMPLE mode. "
+ + "Given query is " + query.getNativeSql();
+
+ // Per https://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY
+ // A Bind message can use the unnamed prepared statement to create a named portal.
+ // If the Bind is successful, an Execute message can reference that named portal until either
+ // the end of the current transaction
+ // or the named portal is explicitly destroyed
+
+ boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
+ boolean noMeta = (flags & QueryExecutor.QUERY_NO_METADATA) != 0;
+ boolean describeOnly = (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) != 0;
+ // extended queries always use a portal
+ // the usePortal flag controls whether or not we use a *named* portal
+ boolean usePortal = (flags & QueryExecutor.QUERY_FORWARD_CURSOR) != 0 && !noResults && !noMeta
+ && fetchSize > 0 && !describeOnly;
+ boolean oneShot = (flags & QueryExecutor.QUERY_ONESHOT) != 0;
+ boolean noBinaryTransfer = (flags & QUERY_NO_BINARY_TRANSFER) != 0;
+ boolean forceDescribePortal = (flags & QUERY_FORCE_DESCRIBE_PORTAL) != 0;
+
+ // Work out how many rows to fetch in this pass.
+
+ int rows;
+ if (noResults) {
+ rows = 1; // We're discarding any results anyway, so limit data transfer to a minimum
+ } else if (!usePortal) {
+ rows = maxRows; // Not using a portal -- fetchSize is irrelevant
+ } else if (maxRows != 0 && fetchSize > maxRows) {
+ // fetchSize > maxRows, use maxRows (nb: fetchSize cannot be 0 if usePortal == true)
+ rows = maxRows;
+ } else {
+ rows = fetchSize; // maxRows > fetchSize
+ }
+
+ sendParse(query, params, oneShot);
+
+ // Must do this after sendParse to pick up any changes to the
+ // query's state.
+ //
+ boolean queryHasUnknown = query.hasUnresolvedTypes();
+ boolean paramsHasUnknown = params.hasUnresolvedTypes();
+
+ boolean describeStatement = describeOnly
+ || (!oneShot && paramsHasUnknown && queryHasUnknown && !query.isStatementDescribed());
+
+ if (!describeStatement && paramsHasUnknown && !queryHasUnknown) {
+ int[] queryOIDs = query.getPrepareTypes();
+ int[] paramOIDs = params.getTypeOIDs();
+ for (int i = 0; i < paramOIDs.length; i++) {
+ // Only supply type information when there isn't any
+ // already, don't arbitrarily overwrite user supplied
+ // type information.
+ if (paramOIDs[i] == Oid.UNSPECIFIED) {
+ params.setResolvedType(i + 1, queryOIDs[i]);
+ }
+ }
+ }
+
+ if (describeStatement) {
+ sendDescribeStatement(query, params, describeOnly);
+ if (describeOnly) {
+ return;
+ }
+ }
+
+ // Construct a new portal if needed.
+ Portal portal = null;
+ if (usePortal) {
+ String portalName = "C_" + (nextUniqueID++);
+ portal = new Portal(query, portalName);
+ }
+
+ sendBind(query, params, portal, noBinaryTransfer);
+
+ // A statement describe will also output a RowDescription,
+ // so don't reissue it here if we've already done so.
+ //
+ if (!noMeta && !describeStatement) {
+ /*
+ * don't send describe if we already have cached the row description from previous executions
+ *
+ * XXX Clearing the fields / unpreparing the query (in sendParse) is incorrect, see bug #267.
+ * We might clear the cached fields in a later execution of this query if the bind parameter
+ * types change, but we're assuming here that they'll still be valid when we come to process
+ * the results of this query, so we don't send a new describe here. We re-describe after the
+ * fields are cleared, but the result of that gets processed after processing the results from
+ * earlier executions that we didn't describe because we didn't think we had to.
+ *
+ * To work around this, force a Describe at each execution in batches where this can be a
+ * problem. It won't cause more round trips so the performance impact is low, and it'll ensure
+ * that the field information available when we decoded the results. This is undeniably a
+ * hack, but there aren't many good alternatives.
+ */
+ if (!query.isPortalDescribed() || forceDescribePortal) {
+ sendDescribePortal(query, portal);
+ }
+ }
+
+ sendExecute(query, portal, rows);
+ }
+
+ private void sendSimpleQuery(SimpleQuery query, SimpleParameterList params) throws IOException {
+ String nativeSql = query.toString(params);
+
+ LOGGER.log(Level.FINEST, " FE=> SimpleQuery(query=\"{0}\")", nativeSql);
+ Encoding encoding = pgStream.getEncoding();
+
+ byte[] encoded = encoding.encode(nativeSql);
+ pgStream.sendChar('Q');
+ pgStream.sendInteger4(encoded.length + 4 + 1);
+ pgStream.send(encoded);
+ pgStream.sendChar(0);
+ pgStream.flush();
+ pendingExecuteQueue.add(new ExecuteRequest(query, null, true));
+ pendingDescribePortalQueue.add(query);
+ }
+
+ private void registerParsedQuery(SimpleQuery query, String statementName) {
+ if (statementName == null) {
+ return;
+ }
+
+ PhantomReference cleanupRef =
+ new PhantomReference<>(query, parsedQueryCleanupQueue);
+ parsedQueryMap.put(cleanupRef, statementName);
+ query.setCleanupRef(cleanupRef);
+ }
+
+ private void processDeadParsedQueries() throws IOException {
+ Reference extends SimpleQuery> deadQuery;
+ while ((deadQuery = parsedQueryCleanupQueue.poll()) != null) {
+ String statementName = parsedQueryMap.remove(deadQuery);
+ sendCloseStatement(statementName);
+ deadQuery.clear();
+ }
+ }
+
+ private void registerOpenPortal(Portal portal) {
+ if (portal == UNNAMED_PORTAL) {
+ return; // Using the unnamed portal.
+ }
+
+ String portalName = portal.getPortalName();
+ PhantomReference cleanupRef =
+ new PhantomReference<>(portal, openPortalCleanupQueue);
+ openPortalMap.put(cleanupRef, portalName);
+ portal.setCleanupRef(cleanupRef);
+ }
+
+ private void processDeadPortals() throws IOException {
+ Reference extends Portal> deadPortal;
+ while ((deadPortal = openPortalCleanupQueue.poll()) != null) {
+ String portalName = openPortalMap.remove(deadPortal);
+ sendClosePortal(portalName);
+ deadPortal.clear();
+ }
+ }
+
+ protected void processResults(ResultHandler handler, int flags) throws IOException {
+ processResults(handler, flags, false);
+ }
+
+ protected void processResults(ResultHandler handler, int flags, boolean adaptiveFetch)
+ throws IOException {
+ boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
+ boolean bothRowsAndStatus = (flags & QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS) != 0;
+
+ List tuples = null;
+
+ int c;
+ boolean endQuery = false;
+
+ // At the end of a command execution we have the CommandComplete
+ // message to tell us we're done, but with a describeOnly command
+ // we have no real flag to let us know we're done. We've got to
+ // look for the next RowDescription or NoData message and return
+ // from there.
+ boolean doneAfterRowDescNoData = false;
+
+ while (!endQuery) {
+ c = pgStream.receiveChar();
+ switch (c) {
+ case 'A': // Asynchronous Notify
+ receiveAsyncNotify();
+ break;
+
+ case '1': // Parse Complete (response to Parse)
+ pgStream.receiveInteger4(); // len, discarded
+
+ SimpleQuery parsedQuery = pendingParseQueue.removeFirst();
+ String parsedStatementName = parsedQuery.getStatementName();
+
+ LOGGER.log(Level.FINEST, " <=BE ParseComplete [{0}]", parsedStatementName);
+
+ break;
+
+ case 't': { // ParameterDescription
+ pgStream.receiveInteger4(); // len, discarded
+
+ LOGGER.log(Level.FINEST, " <=BE ParameterDescription");
+
+ DescribeRequest describeData = pendingDescribeStatementQueue.getFirst();
+ SimpleQuery query = describeData.query;
+ SimpleParameterList params = describeData.parameterList;
+ boolean describeOnly = describeData.describeOnly;
+ // This might differ from query.getStatementName if the query was re-prepared
+ String origStatementName = describeData.statementName;
+
+ int numParams = pgStream.receiveInteger2();
+
+ for (int i = 1; i <= numParams; i++) {
+ int typeOid = pgStream.receiveInteger4();
+ params.setResolvedType(i, typeOid);
+ }
+
+ // Since we can issue multiple Parse and DescribeStatement
+ // messages in a single network trip, we need to make
+ // sure the describe results we requested are still
+ // applicable to the latest parsed query.
+ //
+ if ((origStatementName == null && query.getStatementName() == null)
+ || (origStatementName != null
+ && origStatementName.equals(query.getStatementName()))) {
+ query.setPrepareTypes(params.getTypeOIDs());
+ }
+
+ if (describeOnly) {
+ doneAfterRowDescNoData = true;
+ } else {
+ pendingDescribeStatementQueue.removeFirst();
+ }
+ break;
+ }
+
+ case '2': // Bind Complete (response to Bind)
+ pgStream.receiveInteger4(); // len, discarded
+
+ Portal boundPortal = pendingBindQueue.removeFirst();
+ LOGGER.log(Level.FINEST, " <=BE BindComplete [{0}]", boundPortal);
+
+ registerOpenPortal(boundPortal);
+ break;
+
+ case '3': // Close Complete (response to Close)
+ pgStream.receiveInteger4(); // len, discarded
+ LOGGER.log(Level.FINEST, " <=BE CloseComplete");
+ break;
+
+ case 'n': // No Data (response to Describe)
+ pgStream.receiveInteger4(); // len, discarded
+ LOGGER.log(Level.FINEST, " <=BE NoData");
+
+ pendingDescribePortalQueue.removeFirst();
+
+ if (doneAfterRowDescNoData) {
+ DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
+ SimpleQuery currentQuery = describeData.query;
+
+ Field[] fields = currentQuery.getFields();
+
+ if (fields != null) { // There was a resultset.
+ tuples = new ArrayList<>();
+ handler.handleResultRows(currentQuery, fields, tuples, null);
+ tuples = null;
+ }
+ }
+ break;
+
+ case 's': { // Portal Suspended (end of Execute)
+ // nb: this appears *instead* of CommandStatus.
+ // Must be a SELECT if we suspended, so don't worry about it.
+
+ pgStream.receiveInteger4(); // len, discarded
+ LOGGER.log(Level.FINEST, " <=BE PortalSuspended");
+
+ ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
+ SimpleQuery currentQuery = executeData.query;
+ Portal currentPortal = executeData.portal;
+
+ if (currentPortal != null) {
+ // Existence of portal defines if query was using fetching.
+ adaptiveFetchCache
+ .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
+ }
+ pgStream.clearMaxRowSizeBytes();
+
+ Field[] fields = currentQuery.getFields();
+ if (fields != null && tuples == null) {
+ // When no results expected, pretend an empty resultset was returned
+ // Not sure if new ArrayList can be always replaced with emptyList
+ tuples = noResults ? Collections.emptyList() : new ArrayList();
+ }
+
+ if (fields != null && tuples != null) {
+ handler.handleResultRows(currentQuery, fields, tuples, currentPortal);
+ }
+ tuples = null;
+ break;
+ }
+
+ case 'C': { // Command Status (end of Execute)
+ // Handle status.
+ String status = receiveCommandStatus();
+ if (isFlushCacheOnDeallocate()
+ && (status.startsWith("DEALLOCATE ALL") || status.startsWith("DISCARD ALL"))) {
+ deallocateEpoch++;
+ }
+
+ doneAfterRowDescNoData = false;
+
+ ExecuteRequest executeData = pendingExecuteQueue.peekFirst();
+ SimpleQuery currentQuery = executeData.query;
+ Portal currentPortal = executeData.portal;
+
+ if (currentPortal != null) {
+ // Existence of portal defines if query was using fetching.
+
+ // Command executed, adaptive fetch size can be removed for this query, max row size can be cleared
+ adaptiveFetchCache.removeQuery(adaptiveFetch, currentQuery);
+ // Update to change fetch size for other fetch portals of this query
+ adaptiveFetchCache
+ .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
+ }
+ pgStream.clearMaxRowSizeBytes();
+
+ if (status.startsWith("SET")) {
+ String nativeSql = currentQuery.getNativeQuery().nativeSql;
+ // Scan only the first 1024 characters to
+ // avoid big overhead for long queries.
+ if (nativeSql.lastIndexOf("search_path", 1024) != -1
+ && !nativeSql.equals(lastSetSearchPathQuery)) {
+ // Search path was changed, invalidate prepared statement cache
+ lastSetSearchPathQuery = nativeSql;
+ deallocateEpoch++;
+ }
+ }
+
+ if (!executeData.asSimple) {
+ pendingExecuteQueue.removeFirst();
+ } else {
+ // For simple 'Q' queries, executeQueue is cleared via ReadyForQuery message
+ }
+
+ // we want to make sure we do not add any results from these queries to the result set
+ if (currentQuery == autoSaveQuery
+ || currentQuery == releaseAutoSave) {
+ // ignore "SAVEPOINT" or RELEASE SAVEPOINT status from autosave query
+ break;
+ }
+
+ Field[] fields = currentQuery.getFields();
+ if (fields != null && tuples == null) {
+ // When no results expected, pretend an empty resultset was returned
+ // Not sure if new ArrayList can be always replaced with emptyList
+ tuples = noResults ? Collections.emptyList() : new ArrayList();
+ }
+
+ // If we received tuples we must know the structure of the
+ // resultset, otherwise we won't be able to fetch columns
+ // from it, etc, later.
+ if (fields == null && tuples != null) {
+ throw new IllegalStateException(
+ "Received resultset tuples, but no field structure for them");
+ }
+
+ if (fields != null && tuples != null) {
+ // There was a resultset.
+ handler.handleResultRows(currentQuery, fields, tuples, null);
+ tuples = null;
+
+ if (bothRowsAndStatus) {
+ interpretCommandStatus(status, handler);
+ }
+ } else {
+ interpretCommandStatus(status, handler);
+ }
+
+ if (executeData.asSimple) {
+ // Simple queries might return several resultsets, thus we clear
+ // fields, so queries like "select 1;update; select2" will properly
+ // identify that "update" did not return any results
+ currentQuery.setFields(null);
+ }
+
+ if (currentPortal != null) {
+ currentPortal.close();
+ }
+ break;
+ }
+
+ case 'D': // Data Transfer (ongoing Execute response)
+ Tuple tuple = null;
+ try {
+ tuple = pgStream.receiveTupleV3();
+ } catch (OutOfMemoryError oome) {
+ if (!noResults) {
+ handler.handleError(
+ new PSQLException(GT.tr("Ran out of memory retrieving query results."),
+ PSQLState.OUT_OF_MEMORY, oome));
+ }
+ } catch (SQLException e) {
+ handler.handleError(e);
+ }
+ if (!noResults) {
+ if (tuples == null) {
+ tuples = new ArrayList<>();
+ }
+ if (tuple != null) {
+ tuples.add(tuple);
+ }
+ }
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ int length;
+ if (tuple == null) {
+ length = -1;
+ } else {
+ length = tuple.length();
+ }
+ LOGGER.log(Level.FINEST, " <=BE DataRow(len={0})", length);
+ }
+
+ break;
+
+ case 'E':
+ // Error Response (response to pretty much everything; backend then skips until Sync)
+ SQLException error = receiveErrorResponse();
+ handler.handleError(error);
+ if (willHealViaReparse(error)) {
+ // prepared statement ... is not valid kind of error
+ // Technically speaking, the error is unexpected, thus we invalidate other
+ // server-prepared statements just in case.
+ deallocateEpoch++;
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " FE: received {0}, will invalidate statements. deallocateEpoch is now {1}",
+ new Object[]{error.getSQLState(), deallocateEpoch});
+ }
+ }
+ // keep processing
+ break;
+
+ case 'I': { // Empty Query (end of Execute)
+ pgStream.receiveInteger4();
+
+ LOGGER.log(Level.FINEST, " <=BE EmptyQuery");
+
+ ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
+ Portal currentPortal = executeData.portal;
+ handler.handleCommandStatus("EMPTY", 0, 0);
+ if (currentPortal != null) {
+ currentPortal.close();
+ }
+ break;
+ }
+
+ case 'N': // Notice Response
+ SQLWarning warning = receiveNoticeResponse();
+ handler.handleWarning(warning);
+ break;
+
+ case 'S': // Parameter Status
+ try {
+ receiveParameterStatus();
+ } catch (SQLException e) {
+ handler.handleError(e);
+ endQuery = true;
+ }
+ break;
+
+ case 'T': // Row Description (response to Describe)
+ Field[] fields = receiveFields();
+ tuples = new ArrayList<>();
+
+ SimpleQuery query = pendingDescribePortalQueue.peekFirst();
+ if (!pendingExecuteQueue.isEmpty()
+ && !pendingExecuteQueue.peekFirst().asSimple) {
+ pendingDescribePortalQueue.removeFirst();
+ }
+ query.setFields(fields);
+
+ if (doneAfterRowDescNoData) {
+ DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
+ SimpleQuery currentQuery = describeData.query;
+ currentQuery.setFields(fields);
+
+ handler.handleResultRows(currentQuery, fields, tuples, null);
+ tuples = null;
+ }
+ break;
+
+ case 'Z': // Ready For Query (eventual response to Sync)
+ receiveRFQ();
+ if (!pendingExecuteQueue.isEmpty()
+ && pendingExecuteQueue.peekFirst().asSimple) {
+ tuples = null;
+ pgStream.clearResultBufferCount();
+
+ ExecuteRequest executeRequest = pendingExecuteQueue.removeFirst();
+ // Simple queries might return several resultsets, thus we clear
+ // fields, so queries like "select 1;update; select2" will properly
+ // identify that "update" did not return any results
+ executeRequest.query.setFields(null);
+
+ pendingDescribePortalQueue.removeFirst();
+ if (!pendingExecuteQueue.isEmpty()) {
+ if (getTransactionState() == TransactionState.IDLE) {
+ handler.secureProgress();
+ }
+ // process subsequent results (e.g. for cases like batched execution of simple 'Q' queries)
+ break;
+ }
+ }
+ endQuery = true;
+
+ // Reset the statement name of Parses that failed.
+ while (!pendingParseQueue.isEmpty()) {
+ SimpleQuery failedQuery = pendingParseQueue.removeFirst();
+ failedQuery.unprepare();
+ }
+
+ pendingParseQueue.clear(); // No more ParseComplete messages expected.
+ // Pending "describe" requests might be there in case of error
+ // If that is the case, reset "described" status, so the statement is properly
+ // described on next execution
+ while (!pendingDescribeStatementQueue.isEmpty()) {
+ DescribeRequest request = pendingDescribeStatementQueue.removeFirst();
+ LOGGER.log(Level.FINEST, " FE marking setStatementDescribed(false) for query {0}", request.query);
+ request.query.setStatementDescribed(false);
+ }
+ while (!pendingDescribePortalQueue.isEmpty()) {
+ SimpleQuery describePortalQuery = pendingDescribePortalQueue.removeFirst();
+ LOGGER.log(Level.FINEST, " FE marking setPortalDescribed(false) for query {0}", describePortalQuery);
+ describePortalQuery.setPortalDescribed(false);
+ }
+ pendingBindQueue.clear(); // No more BindComplete messages expected.
+ pendingExecuteQueue.clear(); // No more query executions expected.
+ break;
+
+ case 'G': // CopyInResponse
+ LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
+ LOGGER.log(Level.FINEST, " FE=> CopyFail");
+
+ // COPY sub-protocol is not implemented yet
+ // We'll send a CopyFail message for COPY FROM STDIN so that
+ // server does not wait for the data.
+
+ byte[] buf = "COPY commands are only supported using the CopyManager API.".getBytes(StandardCharsets.US_ASCII);
+ pgStream.sendChar('f');
+ pgStream.sendInteger4(buf.length + 4 + 1);
+ pgStream.send(buf);
+ pgStream.sendChar(0);
+ pgStream.flush();
+ sendSync(); // send sync message
+ skipMessage(); // skip the response message
+ break;
+
+ case 'H': // CopyOutResponse
+ LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
+
+ skipMessage();
+ // In case of CopyOutResponse, we cannot abort data transfer,
+ // so just throw an error and ignore CopyData messages
+ handler.handleError(
+ new PSQLException(GT.tr("COPY commands are only supported using the CopyManager API."),
+ PSQLState.NOT_IMPLEMENTED));
+ break;
+
+ case 'c': // CopyDone
+ skipMessage();
+ LOGGER.log(Level.FINEST, " <=BE CopyDone");
+ break;
+
+ case 'd': // CopyData
+ skipMessage();
+ LOGGER.log(Level.FINEST, " <=BE CopyData");
+ break;
+
+ default:
+ throw new IOException("Unexpected packet type: " + c);
+ }
+
+ }
+ }
+
+ /**
+ * Ignore the response message by reading the message length and skipping over those bytes in the
+ * communication stream.
+ */
+ private void skipMessage() throws IOException {
+ int len = pgStream.receiveInteger4();
+
+ assert len >= 4 : "Length from skip message must be at least 4 ";
+
+ // skip len-4 (length includes the 4 bytes for message length itself
+ pgStream.skip(len - 4);
+ }
+
+ @Override
+ public void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize,
+ boolean adaptiveFetch) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ waitOnLock();
+ final Portal portal = (Portal) cursor;
+
+ // Insert a ResultHandler that turns bare command statuses into empty datasets
+ // (if the fetch returns no rows, we see just a CommandStatus..)
+ final ResultHandler delegateHandler = handler;
+ final SimpleQuery query = portal.getQuery();
+ handler = new ResultHandlerDelegate(delegateHandler) {
+ @Override
+ public void handleCommandStatus(String status, long updateCount, long insertOID) {
+ handleResultRows(query, NO_FIELDS, new ArrayList<>(), null);
+ }
+ };
+
+ // Now actually run it.
+
+ try {
+ processDeadParsedQueries();
+ processDeadPortals();
+
+ sendExecute(query, portal, fetchSize);
+ sendSync();
+
+ processResults(handler, 0, adaptiveFetch);
+ estimatedReceiveBufferBytes = 0;
+ } catch (IOException e) {
+ abort();
+ handler.handleError(
+ new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+ PSQLState.CONNECTION_FAILURE, e));
+ }
+
+ handler.handleCompletion();
+ }
+ }
+
+ @Override
+ public int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor) {
+ if (cursor instanceof Portal) {
+ Query query = ((Portal) cursor).getQuery();
+ if (Objects.nonNull(query)) {
+ return adaptiveFetchCache
+ .getFetchSizeForQuery(adaptiveFetch, query);
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public boolean getAdaptiveFetch() {
+ return this.adaptiveFetchCache.getAdaptiveFetch();
+ }
+
+ @Override
+ public void setAdaptiveFetch(boolean adaptiveFetch) {
+ this.adaptiveFetchCache.setAdaptiveFetch(adaptiveFetch);
+ }
+
+ @Override
+ public void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
+ if (cursor instanceof Portal) {
+ Query query = ((Portal) cursor).getQuery();
+ if (Objects.nonNull(query)) {
+ adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
+ }
+ }
+ }
+
+ @Override
+ public void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
+ if (cursor instanceof Portal) {
+ Query query = ((Portal) cursor).getQuery();
+ if (Objects.nonNull(query)) {
+ adaptiveFetchCache.removeQuery(adaptiveFetch, query);
+ }
+ }
+ }
+
+ /*
+ * Receive the field descriptions from the back end.
+ */
+ private Field[] receiveFields() throws IOException {
+ pgStream.receiveInteger4(); // MESSAGE SIZE
+ int size = pgStream.receiveInteger2();
+ Field[] fields = new Field[size];
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE RowDescription({0})", size);
+ }
+
+ for (int i = 0; i < fields.length; i++) {
+ String columnLabel = pgStream.receiveCanonicalString();
+ int tableOid = pgStream.receiveInteger4();
+ short positionInTable = (short) pgStream.receiveInteger2();
+ int typeOid = pgStream.receiveInteger4();
+ int typeLength = pgStream.receiveInteger2();
+ int typeModifier = pgStream.receiveInteger4();
+ int formatType = pgStream.receiveInteger2();
+ fields[i] = new Field(columnLabel,
+ typeOid, typeLength, typeModifier, tableOid, positionInTable);
+ fields[i].setFormat(formatType);
+
+ LOGGER.log(Level.FINEST, " {0}", fields[i]);
+ }
+
+ return fields;
+ }
+
+ private void receiveAsyncNotify() throws IOException {
+ int len = pgStream.receiveInteger4(); // MESSAGE SIZE
+ assert len > 4 : "Length for AsyncNotify must be at least 4";
+
+ int pid = pgStream.receiveInteger4();
+ String msg = pgStream.receiveCanonicalString();
+ String param = pgStream.receiveString();
+ addNotification(new Notification(msg, pid, param));
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE AsyncNotify({0},{1},{2})", new Object[]{pid, msg, param});
+ }
+ }
+
+ private SQLException receiveErrorResponse() throws IOException {
+ // it's possible to get more than one error message for a query
+ // see libpq comments wrt backend closing a connection
+ // so, append messages to a string buffer and keep processing
+ // check at the bottom to see if we need to throw an exception
+
+ int elen = pgStream.receiveInteger4();
+ assert elen > 4 : "Error response length must be greater than 4";
+
+ EncodingPredictor.DecodeResult totalMessage = pgStream.receiveErrorString(elen - 4);
+ ServerErrorMessage errorMsg = new ServerErrorMessage(totalMessage);
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg.toString());
+ }
+
+ PSQLException error = new PSQLException(errorMsg, this.logServerErrorDetail);
+ if (transactionFailCause == null) {
+ transactionFailCause = error;
+ } else {
+ error.initCause(transactionFailCause);
+ }
+ return error;
+ }
+
+ private SQLWarning receiveNoticeResponse() throws IOException {
+ int nlen = pgStream.receiveInteger4();
+ assert nlen > 4 : "Notice Response length must be greater than 4";
+
+ ServerErrorMessage warnMsg = new ServerErrorMessage(pgStream.receiveString(nlen - 4));
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE NoticeResponse({0})", warnMsg.toString());
+ }
+
+ return new PSQLWarning(warnMsg);
+ }
+
+ private String receiveCommandStatus() throws IOException {
+ // TODO: better handle the msg len
+ int len = pgStream.receiveInteger4();
+ // read len -5 bytes (-4 for len and -1 for trailing \0)
+ String status = pgStream.receiveString(len - 5);
+ // now read and discard the trailing \0
+ pgStream.receiveChar(); // Receive(1) would allocate new byte[1], so avoid it
+
+ LOGGER.log(Level.FINEST, " <=BE CommandStatus({0})", status);
+
+ return status;
+ }
+
+ private void interpretCommandStatus(String status, ResultHandler handler) {
+ try {
+ commandCompleteParser.parse(status);
+ } catch (SQLException e) {
+ handler.handleError(e);
+ return;
+ }
+ long oid = commandCompleteParser.getOid();
+ long count = commandCompleteParser.getRows();
+
+ handler.handleCommandStatus(status, count, oid);
+ }
+
+ private void receiveRFQ() throws IOException {
+ if (pgStream.receiveInteger4() != 5) {
+ throw new IOException("unexpected length of ReadyForQuery message");
+ }
+
+ char tStatus = (char) pgStream.receiveChar();
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE ReadyForQuery({0})", tStatus);
+ }
+
+ // Update connection state.
+ switch (tStatus) {
+ case 'I':
+ transactionFailCause = null;
+ setTransactionState(TransactionState.IDLE);
+ break;
+ case 'T':
+ transactionFailCause = null;
+ setTransactionState(TransactionState.OPEN);
+ break;
+ case 'E':
+ setTransactionState(TransactionState.FAILED);
+ break;
+ default:
+ throw new IOException(
+ "unexpected transaction state in ReadyForQuery message: " + (int) tStatus);
+ }
+ }
+
+ @Override
+ @SuppressWarnings("deprecation")
+ protected void sendCloseMessage() throws IOException {
+ closeAction.sendCloseMessage(pgStream);
+ }
+
+ public void readStartupMessages() throws IOException, SQLException {
+ for (int i = 0; i < 1000; i++) {
+ int beresp = pgStream.receiveChar();
+ switch (beresp) {
+ case 'Z':
+ receiveRFQ();
+ // Ready For Query; we're done.
+ return;
+
+ case 'K':
+ // BackendKeyData
+ int msgLen = pgStream.receiveInteger4();
+ if (msgLen != 12) {
+ throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
+ PSQLState.PROTOCOL_VIOLATION);
+ }
+
+ int pid = pgStream.receiveInteger4();
+ int ckey = pgStream.receiveInteger4();
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE BackendKeyData(pid={0},ckey={1})", new Object[]{pid, ckey});
+ }
+
+ setBackendKeyData(pid, ckey);
+ break;
+
+ case 'E':
+ // Error
+ throw receiveErrorResponse();
+
+ case 'N':
+ // Warning
+ addWarning(receiveNoticeResponse());
+ break;
+
+ case 'S':
+ // ParameterStatus
+ receiveParameterStatus();
+
+ break;
+
+ default:
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " invalid message type={0}", (char) beresp);
+ }
+ throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
+ PSQLState.PROTOCOL_VIOLATION);
+ }
+ }
throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
- PSQLState.PROTOCOL_VIOLATION);
- }
+ PSQLState.PROTOCOL_VIOLATION);
}
- }
- public void setTimeZone(TimeZone timeZone) {
- this.timeZone = timeZone;
- }
+ public void receiveParameterStatus() throws IOException, SQLException {
+ // ParameterStatus
+ pgStream.receiveInteger4(); // MESSAGE SIZE
+ final String name = pgStream.receiveCanonicalStringIfPresent();
+ final String value = pgStream.receiveCanonicalStringIfPresent();
- @Override
- public TimeZone getTimeZone() {
- return timeZone;
- }
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE ParameterStatus({0} = {1})", new Object[]{name, value});
+ }
- public void setApplicationName(String applicationName) {
- this.applicationName = applicationName;
- }
+ // if the name is empty, there is nothing to do
+ if (name.isEmpty()) {
+ return;
+ }
- @Override
- public String getApplicationName() {
- if (applicationName == null) {
- return "";
+ // Update client-visible parameter status map for getParameterStatuses()
+ onParameterStatus(name, value);
+
+ if ("client_encoding".equals(name)) {
+ if (allowEncodingChanges) {
+ if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
+ LOGGER.log(Level.FINE,
+ "pgjdbc expects client_encoding to be UTF8 for proper operation. Actual encoding is {0}",
+ value);
+ }
+ pgStream.setEncoding(Encoding.getDatabaseEncoding(value));
+ } else if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
+ close(); // we're screwed now; we can't trust any subsequent string.
+ throw new PSQLException(GT.tr(
+ "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.",
+ value), PSQLState.CONNECTION_FAILURE);
+
+ }
+ }
+
+ if ("DateStyle".equals(name) && !value.startsWith("ISO")
+ && !value.toUpperCase(Locale.ROOT).startsWith("ISO")) {
+ close(); // we're screwed now; we can't trust any subsequent date.
+ throw new PSQLException(GT.tr(
+ "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.",
+ value), PSQLState.CONNECTION_FAILURE);
+ }
+
+ if ("standard_conforming_strings".equals(name)) {
+ if ("on".equals(value)) {
+ setStandardConformingStrings(true);
+ } else if ("off".equals(value)) {
+ setStandardConformingStrings(false);
+ } else {
+ close();
+ // we're screwed now; we don't know how to escape string literals
+ throw new PSQLException(GT.tr(
+ "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.",
+ value), PSQLState.CONNECTION_FAILURE);
+ }
+ return;
+ }
+
+ if ("TimeZone".equals(name)) {
+ setTimeZone(TimestampUtils.parseBackendTimeZone(value));
+ } else if ("application_name".equals(name)) {
+ setApplicationName(value);
+ } else if ("server_version_num".equals(name)) {
+ setServerVersionNum(Integer.parseInt(value));
+ } else if ("server_version".equals(name)) {
+ setServerVersion(value);
+ } else if ("integer_datetimes".equals(name)) {
+ if ("on".equals(value)) {
+ setIntegerDateTimes(true);
+ } else if ("off".equals(value)) {
+ setIntegerDateTimes(false);
+ } else {
+ throw new PSQLException(GT.tr("Protocol error. Session setup failed."),
+ PSQLState.PROTOCOL_VIOLATION);
+ }
+ }
}
- return applicationName;
- }
- @Override
- public ReplicationProtocol getReplicationProtocol() {
- return replicationProtocol;
- }
-
- @Override
- public void addBinaryReceiveOid(int oid) {
- synchronized (useBinaryReceiveForOids) {
- useBinaryReceiveForOids.add(oid);
+ @Override
+ public TimeZone getTimeZone() {
+ return timeZone;
}
- }
- @Override
- public void removeBinaryReceiveOid(int oid) {
- synchronized (useBinaryReceiveForOids) {
- useBinaryReceiveForOids.remove(oid);
+ public void setTimeZone(TimeZone timeZone) {
+ this.timeZone = timeZone;
}
- }
- @Override
- @SuppressWarnings("deprecation")
- public Set extends Integer> getBinaryReceiveOids() {
- // copy the values to prevent ConcurrentModificationException when reader accesses the elements
- synchronized (useBinaryReceiveForOids) {
- return new HashSet<>(useBinaryReceiveForOids);
+ @Override
+ public String getApplicationName() {
+ if (applicationName == null) {
+ return "";
+ }
+ return applicationName;
}
- }
- @Override
- public boolean useBinaryForReceive(int oid) {
- synchronized (useBinaryReceiveForOids) {
- return useBinaryReceiveForOids.contains(oid);
+ public void setApplicationName(String applicationName) {
+ this.applicationName = applicationName;
}
- }
- @Override
- public void setBinaryReceiveOids(Set oids) {
- synchronized (useBinaryReceiveForOids) {
- useBinaryReceiveForOids.clear();
- useBinaryReceiveForOids.addAll(oids);
+ @Override
+ public ReplicationProtocol getReplicationProtocol() {
+ return replicationProtocol;
}
- }
- @Override
- public void addBinarySendOid(int oid) {
- synchronized (useBinarySendForOids) {
- useBinarySendForOids.add(oid);
+ @Override
+ public void addBinaryReceiveOid(int oid) {
+ synchronized (useBinaryReceiveForOids) {
+ useBinaryReceiveForOids.add(oid);
+ }
}
- }
- @Override
- public void removeBinarySendOid(int oid) {
- synchronized (useBinarySendForOids) {
- useBinarySendForOids.remove(oid);
+ @Override
+ public void removeBinaryReceiveOid(int oid) {
+ synchronized (useBinaryReceiveForOids) {
+ useBinaryReceiveForOids.remove(oid);
+ }
}
- }
- @Override
- @SuppressWarnings("deprecation")
- public Set extends Integer> getBinarySendOids() {
- // copy the values to prevent ConcurrentModificationException when reader accesses the elements
- synchronized (useBinarySendForOids) {
- return new HashSet<>(useBinarySendForOids);
+ @Override
+ @SuppressWarnings("deprecation")
+ public Set extends Integer> getBinaryReceiveOids() {
+ // copy the values to prevent ConcurrentModificationException when reader accesses the elements
+ synchronized (useBinaryReceiveForOids) {
+ return new HashSet<>(useBinaryReceiveForOids);
+ }
}
- }
- @Override
- public boolean useBinaryForSend(int oid) {
- synchronized (useBinarySendForOids) {
- return useBinarySendForOids.contains(oid);
+ @Override
+ public void setBinaryReceiveOids(Set oids) {
+ synchronized (useBinaryReceiveForOids) {
+ useBinaryReceiveForOids.clear();
+ useBinaryReceiveForOids.addAll(oids);
+ }
}
- }
- @Override
- public void setBinarySendOids(Set oids) {
- synchronized (useBinarySendForOids) {
- useBinarySendForOids.clear();
- useBinarySendForOids.addAll(oids);
+ @Override
+ public boolean useBinaryForReceive(int oid) {
+ synchronized (useBinaryReceiveForOids) {
+ return useBinaryReceiveForOids.contains(oid);
+ }
}
- }
- private void setIntegerDateTimes(boolean state) {
- integerDateTimes = state;
- }
+ @Override
+ public void addBinarySendOid(int oid) {
+ synchronized (useBinarySendForOids) {
+ useBinarySendForOids.add(oid);
+ }
+ }
- @Override
- public boolean getIntegerDateTimes() {
- return integerDateTimes;
- }
+ @Override
+ public void removeBinarySendOid(int oid) {
+ synchronized (useBinarySendForOids) {
+ useBinarySendForOids.remove(oid);
+ }
+ }
- private final Deque pendingParseQueue = new ArrayDeque<>();
- private final Deque pendingBindQueue = new ArrayDeque<>();
- private final Deque pendingExecuteQueue = new ArrayDeque<>();
- private final Deque pendingDescribeStatementQueue =
- new ArrayDeque<>();
- private final Deque pendingDescribePortalQueue = new ArrayDeque<>();
+ @Override
+ @SuppressWarnings("deprecation")
+ public Set extends Integer> getBinarySendOids() {
+ // copy the values to prevent ConcurrentModificationException when reader accesses the elements
+ synchronized (useBinarySendForOids) {
+ return new HashSet<>(useBinarySendForOids);
+ }
+ }
- private long nextUniqueID = 1;
- private final boolean allowEncodingChanges;
- private final boolean cleanupSavePoints;
+ @Override
+ public void setBinarySendOids(Set oids) {
+ synchronized (useBinarySendForOids) {
+ useBinarySendForOids.clear();
+ useBinarySendForOids.addAll(oids);
+ }
+ }
- /**
- *
The estimated server response size since we last consumed the input stream from the server, in
- * bytes.
- *
- *
Starts at zero, reset by every Sync message. Mainly used for batches.
- *
- *
Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.
- */
- private int estimatedReceiveBufferBytes;
+ @Override
+ public boolean useBinaryForSend(int oid) {
+ synchronized (useBinarySendForOids) {
+ return useBinarySendForOids.contains(oid);
+ }
+ }
- private final SimpleQuery beginTransactionQuery =
- new SimpleQuery(
- new NativeQuery("BEGIN", null, false, SqlCommand.BLANK),
- null, false);
+ @Override
+ public boolean getIntegerDateTimes() {
+ return integerDateTimes;
+ }
- private final SimpleQuery beginReadOnlyTransactionQuery =
- new SimpleQuery(
- new NativeQuery("BEGIN READ ONLY", null, false, SqlCommand.BLANK),
- null, false);
-
- private final SimpleQuery emptyQuery =
- new SimpleQuery(
- new NativeQuery("", null, false,
- SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)
- ), null, false);
-
- private final SimpleQuery autoSaveQuery =
- new SimpleQuery(
- new NativeQuery("SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
- null, false);
-
- private final SimpleQuery releaseAutoSave =
- new SimpleQuery(
- new NativeQuery("RELEASE SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
- null, false);
-
- /*
- In autosave mode we use this query to roll back errored transactions
- */
- private final SimpleQuery restoreToAutoSave =
- new SimpleQuery(
- new NativeQuery("ROLLBACK TO SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
- null, false);
+ private void setIntegerDateTimes(boolean state) {
+ integerDateTimes = state;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java
index 1e6571f..1a40a2c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java
@@ -6,6 +6,11 @@
package org.postgresql.core.v3;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.util.Arrays;
import org.postgresql.core.Oid;
import org.postgresql.core.PGStream;
import org.postgresql.core.ParameterList;
@@ -20,604 +25,596 @@ import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.StreamWrapper;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.charset.StandardCharsets;
-import java.sql.SQLException;
-import java.util.Arrays;
-
/**
* Parameter list for a single-statement V3 query.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
-class SimpleParameterList implements V3ParameterList {
+public class SimpleParameterList implements V3ParameterList {
- private static final byte IN = 1;
- private static final byte OUT = 2;
- private static final byte INOUT = IN | OUT;
+ private static final byte IN = 1;
+ private static final byte OUT = 2;
+ private static final byte INOUT = IN | OUT;
- private static final byte TEXT = 0;
- private static final byte BINARY = 4;
+ private static final byte TEXT = 0;
+ private static final byte BINARY = 4;
+ /**
+ * Marker object representing NULL; this distinguishes "parameter never set" from "parameter set
+ * to null".
+ */
+ private static final Object NULL_OBJECT = new Object();
+ private final Object[] paramValues;
+ private final int[] paramTypes;
+ private final byte[] flags;
+ private final byte[][] encoded;
+ private final TypeTransferModeRegistry transferModeRegistry;
+ private int pos;
- SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) {
- this.paramValues = new Object[paramCount];
- this.paramTypes = new int[paramCount];
- this.encoded = new byte[paramCount][];
- this.flags = new byte[paramCount];
- this.transferModeRegistry = transferModeRegistry;
- }
-
- @Override
- public void registerOutParameter(int index, int sqlType) throws SQLException {
- if (index < 1 || index > paramValues.length) {
- throw new PSQLException(
- GT.tr("The column index is out of range: {0}, number of columns: {1}.",
- index, paramValues.length),
- PSQLState.INVALID_PARAMETER_VALUE);
+ public SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) {
+ this.paramValues = new Object[paramCount];
+ this.paramTypes = new int[paramCount];
+ this.encoded = new byte[paramCount][];
+ this.flags = new byte[paramCount];
+ this.transferModeRegistry = transferModeRegistry;
}
- flags[index - 1] |= OUT;
- }
-
- private void bind(int index, Object value, int oid, byte binary) throws SQLException {
- if (index < 1 || index > paramValues.length) {
- throw new PSQLException(
- GT.tr("The column index is out of range: {0}, number of columns: {1}.",
- index, paramValues.length),
- PSQLState.INVALID_PARAMETER_VALUE);
+ /**
+ *
Escapes a given text value as a literal, wraps it in single quotes, casts it to the
+ * to the given data type, and finally wraps the whole thing in parentheses.
+ *
+ *
For example, "123" and "int4" becomes "('123'::int)"
+ *
+ *
The additional parentheses is added to ensure that the surrounding text of where the
+ * parameter value is entered does modify the interpretation of the value.
+ *
+ *
For example if our input SQL is: SELECT ?b
+ *
+ *
Using a parameter value of '{}' and type of json we'd get:
+ **/
+ private static String quoteAndCast(String text, String type, boolean standardConformingStrings) {
+ StringBuilder sb = new StringBuilder((text.length() + 10) / 10 * 11); // Add 10% for escaping.
+ sb.append("('");
+ try {
+ Utils.escapeLiteral(sb, text, standardConformingStrings);
+ } catch (SQLException e) {
+ // This should only happen if we have an embedded null
+ // and there's not much we can do if we do hit one.
+ //
+ // To force a server side failure, we deliberately include
+ // a zero byte character in the literal to force the server
+ // to reject the command.
+ sb.append('\u0000');
+ }
+ sb.append("'");
+ if (type != null) {
+ sb.append("::");
+ sb.append(type);
+ }
+ sb.append(")");
+ return sb.toString();
}
- --index;
+ private static void streamBytea(PGStream pgStream, StreamWrapper wrapper) throws IOException {
+ byte[] rawData = wrapper.getBytes();
+ if (rawData != null) {
+ pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength());
+ return;
+ }
- encoded[index] = null;
- paramValues[index] = value;
- flags[index] = (byte) (direction(index) | IN | binary);
-
- // If we are setting something to an UNSPECIFIED NULL, don't overwrite
- // our existing type for it. We don't need the correct type info to
- // send this value, and we don't want to overwrite and require a
- // reparse.
- if (oid == Oid.UNSPECIFIED && paramTypes[index] != Oid.UNSPECIFIED && value == NULL_OBJECT) {
- return;
+ pgStream.sendStream(wrapper.getStream(), wrapper.getLength());
}
- paramTypes[index] = oid;
- pos = index + 1;
- }
-
- @Override
- public int getParameterCount() {
- return paramValues.length;
- }
-
- @Override
- public int getOutParameterCount() {
- int count = 0;
- for (int i = 0; i < paramTypes.length; i++) {
- if ((direction(i) & OUT) == OUT) {
- count++;
- }
- }
- // Every function has at least one output.
- if (count == 0) {
- count = 1;
- }
- return count;
-
- }
-
- @Override
- public int getInParameterCount() {
- int count = 0;
- for (int i = 0; i < paramTypes.length; i++) {
- if (direction(i) != OUT) {
- count++;
- }
- }
- return count;
- }
-
- @Override
- public void setIntParameter(int index, int value) throws SQLException {
- byte[] data = new byte[4];
- ByteConverter.int4(data, 0, value);
- bind(index, data, Oid.INT4, BINARY);
- }
-
- @Override
- public void setLiteralParameter(int index, String value, int oid) throws SQLException {
- bind(index, value, oid, TEXT);
- }
-
- @Override
- public void setStringParameter(int index, String value, int oid) throws SQLException {
- bind(index, value, oid, TEXT);
- }
-
- @Override
- public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
- bind(index, value, oid, BINARY);
- }
-
- @Override
- public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
- bind(index, new StreamWrapper(data, offset, length), Oid.BYTEA, BINARY);
- }
-
- @Override
- public void setBytea(int index, InputStream stream, int length) throws SQLException {
- bind(index, new StreamWrapper(stream, length), Oid.BYTEA, BINARY);
- }
-
- @Override
- public void setBytea(int index, InputStream stream) throws SQLException {
- bind(index, new StreamWrapper(stream), Oid.BYTEA, BINARY);
- }
-
- @Override
- public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
- bind(index, writer, Oid.BYTEA, BINARY);
- }
-
- @Override
- public void setText(int index, InputStream stream) throws SQLException {
- bind(index, new StreamWrapper(stream), Oid.TEXT, TEXT);
- }
-
- @Override
- public void setNull(int index, int oid) throws SQLException {
-
- byte binaryTransfer = TEXT;
-
- if (transferModeRegistry != null && transferModeRegistry.useBinaryForReceive(oid)) {
- binaryTransfer = BINARY;
- }
- bind(index, NULL_OBJECT, oid, binaryTransfer);
- }
-
- /**
- *
Escapes a given text value as a literal, wraps it in single quotes, casts it to the
- * to the given data type, and finally wraps the whole thing in parentheses.
- *
- *
For example, "123" and "int4" becomes "('123'::int)"
- *
- *
The additional parentheses is added to ensure that the surrounding text of where the
- * parameter value is entered does modify the interpretation of the value.
- *
- *
For example if our input SQL is: SELECT ?b
- *
- *
Using a parameter value of '{}' and type of json we'd get:
- **/
- private static String quoteAndCast(String text, String type, boolean standardConformingStrings) {
- StringBuilder sb = new StringBuilder((text.length() + 10) / 10 * 11); // Add 10% for escaping.
- sb.append("('");
- try {
- Utils.escapeLiteral(sb, text, standardConformingStrings);
- } catch (SQLException e) {
- // This should only happen if we have an embedded null
- // and there's not much we can do if we do hit one.
- //
- // To force a server side failure, we deliberately include
- // a zero byte character in the literal to force the server
- // to reject the command.
- sb.append('\u0000');
- }
- sb.append("'");
- if (type != null) {
- sb.append("::");
- sb.append(type);
- }
- sb.append(")");
- return sb.toString();
- }
-
- @Override
- public String toString(int index, boolean standardConformingStrings) {
- --index;
- Object paramValue = paramValues[index];
- if (paramValue == null) {
- return "?";
- } else if (paramValue == NULL_OBJECT) {
- return "(NULL)";
- }
- String textValue;
- String type;
- if ((flags[index] & BINARY) == BINARY) {
- // handle some of the numeric types
- switch (paramTypes[index]) {
- case Oid.INT2:
- short s = ByteConverter.int2((byte[]) paramValue, 0);
- textValue = Short.toString(s);
- type = "int2";
- break;
-
- case Oid.INT4:
- int i = ByteConverter.int4((byte[]) paramValue, 0);
- textValue = Integer.toString(i);
- type = "int4";
- break;
-
- case Oid.INT8:
- long l = ByteConverter.int8((byte[]) paramValue, 0);
- textValue = Long.toString(l);
- type = "int8";
- break;
-
- case Oid.FLOAT4:
- float f = ByteConverter.float4((byte[]) paramValue, 0);
- if (Float.isNaN(f)) {
- return "('NaN'::real)";
- }
- textValue = Float.toString(f);
- type = "real";
- break;
-
- case Oid.FLOAT8:
- double d = ByteConverter.float8((byte[]) paramValue, 0);
- if (Double.isNaN(d)) {
- return "('NaN'::double precision)";
- }
- textValue = Double.toString(d);
- type = "double precision";
- break;
-
- case Oid.NUMERIC:
- Number n = ByteConverter.numeric((byte[]) paramValue);
- if (n instanceof Double) {
- assert ((Double) n).isNaN();
- return "('NaN'::numeric)";
- }
- textValue = n.toString();
- type = "numeric";
- break;
-
- case Oid.UUID:
- textValue =
- new UUIDArrayAssistant().buildElement((byte[]) paramValue, 0, 16).toString();
- type = "uuid";
- break;
-
- case Oid.POINT:
- PGpoint pgPoint = new PGpoint();
- pgPoint.setByteValue((byte[]) paramValue, 0);
- textValue = pgPoint.toString();
- type = "point";
- break;
-
- case Oid.BOX:
- PGbox pgBox = new PGbox();
- pgBox.setByteValue((byte[]) paramValue, 0);
- textValue = pgBox.toString();
- type = "box";
- break;
-
- default:
- return "?";
- }
- } else {
- textValue = paramValue.toString();
- switch (paramTypes[index]) {
- case Oid.INT2:
- type = "int2";
- break;
- case Oid.INT4:
- type = "int4";
- break;
- case Oid.INT8:
- type = "int8";
- break;
- case Oid.FLOAT4:
- type = "real";
- break;
- case Oid.FLOAT8:
- type = "double precision";
- break;
- case Oid.TIMESTAMP:
- type = "timestamp";
- break;
- case Oid.TIMESTAMPTZ:
- type = "timestamp with time zone";
- break;
- case Oid.TIME:
- type = "time";
- break;
- case Oid.TIMETZ:
- type = "time with time zone";
- break;
- case Oid.DATE:
- type = "date";
- break;
- case Oid.INTERVAL:
- type = "interval";
- break;
- case Oid.NUMERIC:
- type = "numeric";
- break;
- case Oid.UUID:
- type = "uuid";
- break;
- case Oid.BOOL:
- type = "boolean";
- break;
- case Oid.BOX:
- type = "box";
- break;
- case Oid.POINT:
- type = "point";
- break;
- default:
- type = null;
- }
- }
- return quoteAndCast(textValue, type, standardConformingStrings);
- }
-
- @Override
- public void checkAllParametersSet() throws SQLException {
- for (int i = 0; i < paramTypes.length; i++) {
- if (direction(i) != OUT && paramValues[i] == null) {
- throw new PSQLException(GT.tr("No value specified for parameter {0}.", i + 1),
- PSQLState.INVALID_PARAMETER_VALUE);
- }
- }
- }
-
- @Override
- public void convertFunctionOutParameters() {
- for (int i = 0; i < paramTypes.length; i++) {
- if (direction(i) == OUT) {
- paramTypes[i] = Oid.VOID;
- paramValues[i] = NULL_OBJECT;
- }
- }
- }
-
- //
- // bytea helper
- //
-
- private static void streamBytea(PGStream pgStream, StreamWrapper wrapper) throws IOException {
- byte[] rawData = wrapper.getBytes();
- if (rawData != null) {
- pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength());
- return;
+ private static void streamBytea(PGStream pgStream, ByteStreamWriter writer) throws IOException {
+ pgStream.send(writer);
}
- pgStream.sendStream(wrapper.getStream(), wrapper.getLength());
- }
+ @Override
+ public void registerOutParameter(int index, int sqlType) throws SQLException {
+ if (index < 1 || index > paramValues.length) {
+ throw new PSQLException(
+ GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+ index, paramValues.length),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
- //
- // byte stream writer support
- //
-
- private static void streamBytea(PGStream pgStream, ByteStreamWriter writer) throws IOException {
- pgStream.send(writer);
- }
-
- @Override
- public int[] getTypeOIDs() {
- return paramTypes;
- }
-
- //
- // Package-private V3 accessors
- //
-
- int getTypeOID(int index) {
- return paramTypes[index - 1];
- }
-
- boolean hasUnresolvedTypes() {
- for (int paramType : paramTypes) {
- if (paramType == Oid.UNSPECIFIED) {
- return true;
- }
- }
- return false;
- }
-
- void setResolvedType(int index, int oid) {
- // only allow overwriting an unknown value or VOID value
- if (paramTypes[index - 1] == Oid.UNSPECIFIED || paramTypes[index - 1] == Oid.VOID) {
- paramTypes[index - 1] = oid;
- } else if (paramTypes[index - 1] != oid) {
- throw new IllegalArgumentException("Can't change resolved type for param: " + index + " from "
- + paramTypes[index - 1] + " to " + oid);
- }
- }
-
- boolean isNull(int index) {
- return paramValues[index - 1] == NULL_OBJECT;
- }
-
- boolean isBinary(int index) {
- return (flags[index - 1] & BINARY) != 0;
- }
-
- private byte direction(int index) {
- return (byte) (flags[index] & INOUT);
- }
-
- int getV3Length(int index) {
- --index;
-
- // Null?
- Object value = paramValues[index];
- if (value == null || value == NULL_OBJECT) {
- throw new IllegalArgumentException("can't getV3Length() on a null parameter");
+ flags[index - 1] |= OUT;
}
- // Directly encoded?
- if (value instanceof byte[]) {
- return ((byte[]) value).length;
+ private void bind(int index, Object value, int oid, byte binary) throws SQLException {
+ if (index < 1 || index > paramValues.length) {
+ throw new PSQLException(
+ GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+ index, paramValues.length),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+
+ --index;
+
+ encoded[index] = null;
+ paramValues[index] = value;
+ flags[index] = (byte) (direction(index) | IN | binary);
+
+ // If we are setting something to an UNSPECIFIED NULL, don't overwrite
+ // our existing type for it. We don't need the correct type info to
+ // send this value, and we don't want to overwrite and require a
+ // reparse.
+ if (oid == Oid.UNSPECIFIED && paramTypes[index] != Oid.UNSPECIFIED && value == NULL_OBJECT) {
+ return;
+ }
+
+ paramTypes[index] = oid;
+ pos = index + 1;
}
- // Binary-format bytea?
- if (value instanceof StreamWrapper) {
- return ((StreamWrapper) value).getLength();
+ @Override
+ public int getParameterCount() {
+ return paramValues.length;
}
- // Binary-format bytea?
- if (value instanceof ByteStreamWriter) {
- return ((ByteStreamWriter) value).getLength();
+ @Override
+ public int getOutParameterCount() {
+ int count = 0;
+ for (int i = 0; i < paramTypes.length; i++) {
+ if ((direction(i) & OUT) == OUT) {
+ count++;
+ }
+ }
+ // Every function has at least one output.
+ if (count == 0) {
+ count = 1;
+ }
+ return count;
+
}
- // Already encoded?
- byte[] encoded = this.encoded[index];
- if (encoded == null) {
- // Encode value and compute actual length using UTF-8.
- this.encoded[index] = encoded = value.toString().getBytes(StandardCharsets.UTF_8);
+ @Override
+ public int getInParameterCount() {
+ int count = 0;
+ for (int i = 0; i < paramTypes.length; i++) {
+ if (direction(i) != OUT) {
+ count++;
+ }
+ }
+ return count;
}
- return encoded.length;
- }
-
- void writeV3Value(int index, PGStream pgStream) throws IOException {
- --index;
-
- // Null?
- Object paramValue = paramValues[index];
- if (paramValue == null || paramValue == NULL_OBJECT) {
- throw new IllegalArgumentException("can't writeV3Value() on a null parameter");
+ @Override
+ public void setIntParameter(int index, int value) throws SQLException {
+ byte[] data = new byte[4];
+ ByteConverter.int4(data, 0, value);
+ bind(index, data, Oid.INT4, BINARY);
}
- // Directly encoded?
- if (paramValue instanceof byte[]) {
- pgStream.send((byte[]) paramValue);
- return;
+ @Override
+ public void setLiteralParameter(int index, String value, int oid) throws SQLException {
+ bind(index, value, oid, TEXT);
}
- // Binary-format bytea?
- if (paramValue instanceof StreamWrapper) {
- try (StreamWrapper streamWrapper = (StreamWrapper) paramValue) {
- streamBytea(pgStream, streamWrapper);
- }
- return;
+ @Override
+ public void setStringParameter(int index, String value, int oid) throws SQLException {
+ bind(index, value, oid, TEXT);
}
- // Streamed bytea?
- if (paramValue instanceof ByteStreamWriter) {
- streamBytea(pgStream, (ByteStreamWriter) paramValue);
- return;
+ @Override
+ public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
+ bind(index, value, oid, BINARY);
}
- // Encoded string.
- if (encoded[index] == null) {
- encoded[index] = ((String) paramValue).getBytes(StandardCharsets.UTF_8);
+ //
+ // bytea helper
+ //
+
+ @Override
+ public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
+ bind(index, new StreamWrapper(data, offset, length), Oid.BYTEA, BINARY);
}
- pgStream.send(encoded[index]);
- }
- @Override
- public ParameterList copy() {
- SimpleParameterList newCopy = new SimpleParameterList(paramValues.length, transferModeRegistry);
- System.arraycopy(paramValues, 0, newCopy.paramValues, 0, paramValues.length);
- System.arraycopy(paramTypes, 0, newCopy.paramTypes, 0, paramTypes.length);
- System.arraycopy(flags, 0, newCopy.flags, 0, flags.length);
- newCopy.pos = pos;
- return newCopy;
- }
+ //
+ // byte stream writer support
+ //
- @Override
- public void clear() {
- Arrays.fill(paramValues, null);
- Arrays.fill(paramTypes, 0);
- Arrays.fill(encoded, null);
- Arrays.fill(flags, (byte) 0);
- pos = 0;
- }
+ @Override
+ public void setBytea(int index, InputStream stream, int length) throws SQLException {
+ bind(index, new StreamWrapper(stream, length), Oid.BYTEA, BINARY);
+ }
- @Override
- public SimpleParameterList [] getSubparams() {
- return null;
- }
+ @Override
+ public void setBytea(int index, InputStream stream) throws SQLException {
+ bind(index, new StreamWrapper(stream), Oid.BYTEA, BINARY);
+ }
- @Override
- public Object[] getValues() {
- return paramValues;
- }
+ //
+ // Package-private V3 accessors
+ //
- @Override
- public int[] getParamTypes() {
- return paramTypes;
- }
+ @Override
+ public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
+ bind(index, writer, Oid.BYTEA, BINARY);
+ }
- @Override
- public byte[] getFlags() {
- return flags;
- }
+ @Override
+ public void setText(int index, InputStream stream) throws SQLException {
+ bind(index, new StreamWrapper(stream), Oid.TEXT, TEXT);
+ }
- @Override
- public byte[] [] getEncoding() {
- return encoded;
- }
+ @Override
+ public void setNull(int index, int oid) throws SQLException {
- @Override
- public void appendAll(ParameterList list) throws SQLException {
- if (list instanceof SimpleParameterList ) {
+ byte binaryTransfer = TEXT;
+
+ if (transferModeRegistry != null && transferModeRegistry.useBinaryForReceive(oid)) {
+ binaryTransfer = BINARY;
+ }
+ bind(index, NULL_OBJECT, oid, binaryTransfer);
+ }
+
+ @Override
+ public String toString(int index, boolean standardConformingStrings) {
+ --index;
+ Object paramValue = paramValues[index];
+ if (paramValue == null) {
+ return "?";
+ } else if (paramValue == NULL_OBJECT) {
+ return "(NULL)";
+ }
+ String textValue;
+ String type;
+ if ((flags[index] & BINARY) == BINARY) {
+ // handle some of the numeric types
+ switch (paramTypes[index]) {
+ case Oid.INT2:
+ short s = ByteConverter.int2((byte[]) paramValue, 0);
+ textValue = Short.toString(s);
+ type = "int2";
+ break;
+
+ case Oid.INT4:
+ int i = ByteConverter.int4((byte[]) paramValue, 0);
+ textValue = Integer.toString(i);
+ type = "int4";
+ break;
+
+ case Oid.INT8:
+ long l = ByteConverter.int8((byte[]) paramValue, 0);
+ textValue = Long.toString(l);
+ type = "int8";
+ break;
+
+ case Oid.FLOAT4:
+ float f = ByteConverter.float4((byte[]) paramValue, 0);
+ if (Float.isNaN(f)) {
+ return "('NaN'::real)";
+ }
+ textValue = Float.toString(f);
+ type = "real";
+ break;
+
+ case Oid.FLOAT8:
+ double d = ByteConverter.float8((byte[]) paramValue, 0);
+ if (Double.isNaN(d)) {
+ return "('NaN'::double precision)";
+ }
+ textValue = Double.toString(d);
+ type = "double precision";
+ break;
+
+ case Oid.NUMERIC:
+ Number n = ByteConverter.numeric((byte[]) paramValue);
+ if (n instanceof Double) {
+ assert ((Double) n).isNaN();
+ return "('NaN'::numeric)";
+ }
+ textValue = n.toString();
+ type = "numeric";
+ break;
+
+ case Oid.UUID:
+ textValue =
+ new UUIDArrayAssistant().buildElement((byte[]) paramValue, 0, 16).toString();
+ type = "uuid";
+ break;
+
+ case Oid.POINT:
+ PGpoint pgPoint = new PGpoint();
+ pgPoint.setByteValue((byte[]) paramValue, 0);
+ textValue = pgPoint.toString();
+ type = "point";
+ break;
+
+ case Oid.BOX:
+ PGbox pgBox = new PGbox();
+ pgBox.setByteValue((byte[]) paramValue, 0);
+ textValue = pgBox.toString();
+ type = "box";
+ break;
+
+ default:
+ return "?";
+ }
+ } else {
+ textValue = paramValue.toString();
+ switch (paramTypes[index]) {
+ case Oid.INT2:
+ type = "int2";
+ break;
+ case Oid.INT4:
+ type = "int4";
+ break;
+ case Oid.INT8:
+ type = "int8";
+ break;
+ case Oid.FLOAT4:
+ type = "real";
+ break;
+ case Oid.FLOAT8:
+ type = "double precision";
+ break;
+ case Oid.TIMESTAMP:
+ type = "timestamp";
+ break;
+ case Oid.TIMESTAMPTZ:
+ type = "timestamp with time zone";
+ break;
+ case Oid.TIME:
+ type = "time";
+ break;
+ case Oid.TIMETZ:
+ type = "time with time zone";
+ break;
+ case Oid.DATE:
+ type = "date";
+ break;
+ case Oid.INTERVAL:
+ type = "interval";
+ break;
+ case Oid.NUMERIC:
+ type = "numeric";
+ break;
+ case Oid.UUID:
+ type = "uuid";
+ break;
+ case Oid.BOOL:
+ type = "boolean";
+ break;
+ case Oid.BOX:
+ type = "box";
+ break;
+ case Oid.POINT:
+ type = "point";
+ break;
+ default:
+ type = null;
+ }
+ }
+ return quoteAndCast(textValue, type, standardConformingStrings);
+ }
+
+ @Override
+ public void checkAllParametersSet() throws SQLException {
+ for (int i = 0; i < paramTypes.length; i++) {
+ if (direction(i) != OUT && paramValues[i] == null) {
+ throw new PSQLException(GT.tr("No value specified for parameter {0}.", i + 1),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ }
+ }
+
+ @Override
+ public void convertFunctionOutParameters() {
+ for (int i = 0; i < paramTypes.length; i++) {
+ if (direction(i) == OUT) {
+ paramTypes[i] = Oid.VOID;
+ paramValues[i] = NULL_OBJECT;
+ }
+ }
+ }
+
+ @Override
+ public int[] getTypeOIDs() {
+ return paramTypes;
+ }
+
+ int getTypeOID(int index) {
+ return paramTypes[index - 1];
+ }
+
+ boolean hasUnresolvedTypes() {
+ for (int paramType : paramTypes) {
+ if (paramType == Oid.UNSPECIFIED) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void setResolvedType(int index, int oid) {
+ // only allow overwriting an unknown value or VOID value
+ if (paramTypes[index - 1] == Oid.UNSPECIFIED || paramTypes[index - 1] == Oid.VOID) {
+ paramTypes[index - 1] = oid;
+ } else if (paramTypes[index - 1] != oid) {
+ throw new IllegalArgumentException("Can't change resolved type for param: " + index + " from "
+ + paramTypes[index - 1] + " to " + oid);
+ }
+ }
+
+ boolean isNull(int index) {
+ return paramValues[index - 1] == NULL_OBJECT;
+ }
+
+ boolean isBinary(int index) {
+ return (flags[index - 1] & BINARY) != 0;
+ }
+
+ private byte direction(int index) {
+ return (byte) (flags[index] & INOUT);
+ }
+
+ int getV3Length(int index) {
+ --index;
+
+ // Null?
+ Object value = paramValues[index];
+ if (value == null || value == NULL_OBJECT) {
+ throw new IllegalArgumentException("can't getV3Length() on a null parameter");
+ }
+
+ // Directly encoded?
+ if (value instanceof byte[]) {
+ return ((byte[]) value).length;
+ }
+
+ // Binary-format bytea?
+ if (value instanceof StreamWrapper) {
+ return ((StreamWrapper) value).getLength();
+ }
+
+ // Binary-format bytea?
+ if (value instanceof ByteStreamWriter) {
+ return ((ByteStreamWriter) value).getLength();
+ }
+
+ // Already encoded?
+ byte[] encoded = this.encoded[index];
+ if (encoded == null) {
+ // Encode value and compute actual length using UTF-8.
+ this.encoded[index] = encoded = value.toString().getBytes(StandardCharsets.UTF_8);
+ }
+
+ return encoded.length;
+ }
+
+ void writeV3Value(int index, PGStream pgStream) throws IOException {
+ --index;
+
+ // Null?
+ Object paramValue = paramValues[index];
+ if (paramValue == null || paramValue == NULL_OBJECT) {
+ throw new IllegalArgumentException("can't writeV3Value() on a null parameter");
+ }
+
+ // Directly encoded?
+ if (paramValue instanceof byte[]) {
+ pgStream.send((byte[]) paramValue);
+ return;
+ }
+
+ // Binary-format bytea?
+ if (paramValue instanceof StreamWrapper) {
+ try (StreamWrapper streamWrapper = (StreamWrapper) paramValue) {
+ streamBytea(pgStream, streamWrapper);
+ }
+ return;
+ }
+
+ // Streamed bytea?
+ if (paramValue instanceof ByteStreamWriter) {
+ streamBytea(pgStream, (ByteStreamWriter) paramValue);
+ return;
+ }
+
+ // Encoded string.
+ if (encoded[index] == null) {
+ encoded[index] = ((String) paramValue).getBytes(StandardCharsets.UTF_8);
+ }
+ pgStream.send(encoded[index]);
+ }
+
+ @Override
+ public ParameterList copy() {
+ SimpleParameterList newCopy = new SimpleParameterList(paramValues.length, transferModeRegistry);
+ System.arraycopy(paramValues, 0, newCopy.paramValues, 0, paramValues.length);
+ System.arraycopy(paramTypes, 0, newCopy.paramTypes, 0, paramTypes.length);
+ System.arraycopy(flags, 0, newCopy.flags, 0, flags.length);
+ newCopy.pos = pos;
+ return newCopy;
+ }
+
+ @Override
+ public void clear() {
+ Arrays.fill(paramValues, null);
+ Arrays.fill(paramTypes, 0);
+ Arrays.fill(encoded, null);
+ Arrays.fill(flags, (byte) 0);
+ pos = 0;
+ }
+
+ @Override
+ public SimpleParameterList[] getSubparams() {
+ return null;
+ }
+
+ @Override
+ public Object[] getValues() {
+ return paramValues;
+ }
+
+ @Override
+ public int[] getParamTypes() {
+ return paramTypes;
+ }
+
+ @Override
+ public byte[] getFlags() {
+ return flags;
+ }
+
+ @Override
+ public byte[][] getEncoding() {
+ return encoded;
+ }
+
+ @Override
+ public void appendAll(ParameterList list) throws SQLException {
+ if (list instanceof SimpleParameterList) {
/* only v3.SimpleParameterList is compatible with this type
we need to create copies of our parameters, otherwise the values can be changed */
- SimpleParameterList spl = (SimpleParameterList) list;
- int inParamCount = spl.getInParameterCount();
- if ((pos + inParamCount) > paramValues.length) {
- throw new PSQLException(
- GT.tr("Added parameters index out of range: {0}, number of columns: {1}.",
- (pos + inParamCount), paramValues.length),
- PSQLState.INVALID_PARAMETER_VALUE);
- }
- System.arraycopy(spl.getValues(), 0, this.paramValues, pos, inParamCount);
- System.arraycopy(spl.getParamTypes(), 0, this.paramTypes, pos, inParamCount);
- System.arraycopy(spl.getFlags(), 0, this.flags, pos, inParamCount);
- System.arraycopy(spl.getEncoding(), 0, this.encoded, pos, inParamCount);
- pos += inParamCount;
+ SimpleParameterList spl = (SimpleParameterList) list;
+ int inParamCount = spl.getInParameterCount();
+ if ((pos + inParamCount) > paramValues.length) {
+ throw new PSQLException(
+ GT.tr("Added parameters index out of range: {0}, number of columns: {1}.",
+ (pos + inParamCount), paramValues.length),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ System.arraycopy(spl.getValues(), 0, this.paramValues, pos, inParamCount);
+ System.arraycopy(spl.getParamTypes(), 0, this.paramTypes, pos, inParamCount);
+ System.arraycopy(spl.getFlags(), 0, this.flags, pos, inParamCount);
+ System.arraycopy(spl.getEncoding(), 0, this.encoded, pos, inParamCount);
+ pos += inParamCount;
+ }
}
- }
- /**
- * Useful implementation of toString.
- * @return String representation of the list values
- */
- @Override
- public String toString() {
- StringBuilder ts = new StringBuilder("<[");
- if (paramValues.length > 0) {
- ts.append(toString(1, true));
- for (int c = 2; c <= paramValues.length; c++) {
- ts.append(" ,").append(toString(c, true));
- }
+ /**
+ * Useful implementation of toString.
+ *
+ * @return String representation of the list values
+ */
+ @Override
+ public String toString() {
+ StringBuilder ts = new StringBuilder("<[");
+ if (paramValues.length > 0) {
+ ts.append(toString(1, true));
+ for (int c = 2; c <= paramValues.length; c++) {
+ ts.append(" ,").append(toString(c, true));
+ }
+ }
+ ts.append("]>");
+ return ts.toString();
}
- ts.append("]>");
- return ts.toString();
- }
-
- private final Object[] paramValues;
- private final int[] paramTypes;
- private final byte[] flags;
- private final byte[] [] encoded;
- private final TypeTransferModeRegistry transferModeRegistry;
-
- /**
- * Marker object representing NULL; this distinguishes "parameter never set" from "parameter set
- * to null".
- */
- private static final Object NULL_OBJECT = new Object();
-
- private int pos;
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java
index d405f4b..b134fd8 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java
@@ -6,6 +6,12 @@
package org.postgresql.core.v3;
+import java.lang.ref.PhantomReference;
+import java.nio.charset.StandardCharsets;
+import java.util.BitSet;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import org.postgresql.core.Field;
import org.postgresql.core.NativeQuery;
import org.postgresql.core.Oid;
@@ -14,13 +20,6 @@ import org.postgresql.core.Query;
import org.postgresql.core.SqlCommand;
import org.postgresql.jdbc.PgResultSet;
-import java.lang.ref.PhantomReference;
-import java.nio.charset.StandardCharsets;
-import java.util.BitSet;
-import java.util.Map;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
/**
* V3 Query implementation for a single-statement query. This also holds the state of any associated
* server-side named statement. We use a PhantomReference managed by the QueryExecutor to handle
@@ -29,353 +28,348 @@ import java.util.logging.Logger;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class SimpleQuery implements Query {
- private static final Logger LOGGER = Logger.getLogger(SimpleQuery.class.getName());
+ static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null);
+ private static final Logger LOGGER = Logger.getLogger(SimpleQuery.class.getName());
+ private final NativeQuery nativeQuery;
+ private final TypeTransferModeRegistry transferModeRegistry;
+ private final boolean sanitiserDisabled;
+ private Map resultSetColumnNameIndexMap;
+ private String statementName;
+ private byte[] encodedStatementName;
+ /**
+ * The stored fields from previous execution or describe of a prepared statement. Always null for
+ * non-prepared statements.
+ */
+ private Field[] fields;
- SimpleQuery(SimpleQuery src) {
- this(src.nativeQuery, src.transferModeRegistry, src.sanitiserDisabled);
- }
+ //
+ // Implementation guts
+ //
+ private boolean needUpdateFieldFormats;
+ private boolean hasBinaryFields;
+ private boolean portalDescribed;
+ private boolean statementDescribed;
+ private PhantomReference> cleanupRef;
+ private int[] preparedTypes;
+ private BitSet unspecifiedParams;
+ private short deallocateEpoch;
+ private Integer cachedMaxResultRowSize;
- SimpleQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
- boolean sanitiserDisabled) {
- this.nativeQuery = query;
- this.transferModeRegistry = transferModeRegistry;
- this.sanitiserDisabled = sanitiserDisabled;
- }
-
- @Override
- public ParameterList createParameterList() {
- if (nativeQuery.bindPositions.length == 0) {
- return NO_PARAMETERS;
+ SimpleQuery(SimpleQuery src) {
+ this(src.nativeQuery, src.transferModeRegistry, src.sanitiserDisabled);
}
- return new SimpleParameterList(getBindCount(), transferModeRegistry);
- }
-
- @Override
- public String toString(ParameterList parameters) {
- return nativeQuery.toString(parameters);
- }
-
- @Override
- public String toString() {
- return toString(null);
- }
-
- @Override
- public void close() {
- unprepare();
- }
-
- @Override
- public SimpleQuery [] getSubqueries() {
- return null;
- }
-
- /**
- *
Return maximum size in bytes that each result row from this query may return. Mainly used for
- * batches that return results.
- *
- *
Results are cached until/unless the query is re-described.
- *
- * @return Max size of result data in bytes according to returned fields, 0 if no results, -1 if
- * result is unbounded.
- * @throws IllegalStateException if the query is not described
- */
- public int getMaxResultRowSize() {
- if (cachedMaxResultRowSize != null) {
- return cachedMaxResultRowSize;
+ SimpleQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
+ boolean sanitiserDisabled) {
+ this.nativeQuery = query;
+ this.transferModeRegistry = transferModeRegistry;
+ this.sanitiserDisabled = sanitiserDisabled;
}
- if (!this.statementDescribed) {
- throw new IllegalStateException(
- "Cannot estimate result row size on a statement that is not described");
- }
- int maxResultRowSize = 0;
- if (fields != null) {
- for (Field f : fields) {
- final int fieldLength = f.getLength();
- if (fieldLength < 1 || fieldLength >= 65535) {
- /*
- * Field length unknown or large; we can't make any safe estimates about the result size,
- * so we have to fall back to sending queries individually.
- */
- maxResultRowSize = -1;
- break;
+
+ @Override
+ public ParameterList createParameterList() {
+ if (nativeQuery.bindPositions.length == 0) {
+ return NO_PARAMETERS;
}
- maxResultRowSize += fieldLength;
- }
+
+ return new SimpleParameterList(getBindCount(), transferModeRegistry);
}
- cachedMaxResultRowSize = maxResultRowSize;
- return maxResultRowSize;
- }
- //
- // Implementation guts
- //
+ @Override
+ public String toString(ParameterList parameters) {
+ return nativeQuery.toString(parameters);
+ }
- @Override
- public String getNativeSql() {
- return nativeQuery.nativeSql;
- }
+ @Override
+ public String toString() {
+ return toString(null);
+ }
- void setStatementName(String statementName, short deallocateEpoch) {
- assert statementName != null : "statement name should not be null";
- this.statementName = statementName;
- this.encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
- this.deallocateEpoch = deallocateEpoch;
- }
+ @Override
+ public void close() {
+ unprepare();
+ }
- void setPrepareTypes(int[] paramTypes) {
- // Remember which parameters were unspecified since the parameters will be overridden later by
- // ParameterDescription message
- for (int i = 0; i < paramTypes.length; i++) {
- int paramType = paramTypes[i];
- if (paramType == Oid.UNSPECIFIED) {
- if (this.unspecifiedParams == null) {
- this.unspecifiedParams = new BitSet();
+ @Override
+ public SimpleQuery[] getSubqueries() {
+ return null;
+ }
+
+ /**
+ *
Return maximum size in bytes that each result row from this query may return. Mainly used for
+ * batches that return results.
+ *
+ *
Results are cached until/unless the query is re-described.
+ *
+ * @return Max size of result data in bytes according to returned fields, 0 if no results, -1 if
+ * result is unbounded.
+ * @throws IllegalStateException if the query is not described
+ */
+ public int getMaxResultRowSize() {
+ if (cachedMaxResultRowSize != null) {
+ return cachedMaxResultRowSize;
}
- this.unspecifiedParams.set(i);
- }
+ if (!this.statementDescribed) {
+ throw new IllegalStateException(
+ "Cannot estimate result row size on a statement that is not described");
+ }
+ int maxResultRowSize = 0;
+ if (fields != null) {
+ for (Field f : fields) {
+ final int fieldLength = f.getLength();
+ if (fieldLength < 1 || fieldLength >= 65535) {
+ /*
+ * Field length unknown or large; we can't make any safe estimates about the result size,
+ * so we have to fall back to sending queries individually.
+ */
+ maxResultRowSize = -1;
+ break;
+ }
+ maxResultRowSize += fieldLength;
+ }
+ }
+ cachedMaxResultRowSize = maxResultRowSize;
+ return maxResultRowSize;
}
- // paramTypes is changed by "describe statement" response, so we clone the array
- // However, we can reuse array if there is one
- if (this.preparedTypes == null) {
- this.preparedTypes = paramTypes.clone();
- return;
- }
- System.arraycopy(paramTypes, 0, this.preparedTypes, 0, paramTypes.length);
- }
-
- int [] getPrepareTypes() {
- return preparedTypes;
- }
-
- String getStatementName() {
- return statementName;
- }
-
- boolean isPreparedFor(int[] paramTypes, short deallocateEpoch) {
- if (statementName == null || preparedTypes == null) {
- return false; // Not prepared.
- }
- if (this.deallocateEpoch != deallocateEpoch) {
- return false;
+ @Override
+ public String getNativeSql() {
+ return nativeQuery.nativeSql;
}
- assert paramTypes.length == preparedTypes.length
- : String.format("paramTypes:%1$d preparedTypes:%2$d", paramTypes.length,
- preparedTypes.length);
- // Check for compatible types.
- BitSet unspecified = this.unspecifiedParams;
- for (int i = 0; i < paramTypes.length; i++) {
- int paramType = paramTypes[i];
- // Either paramType should match prepared type
- // Or paramType==UNSPECIFIED and the prepare type was UNSPECIFIED
+ void setStatementName(String statementName, short deallocateEpoch) {
+ assert statementName != null : "statement name should not be null";
+ this.statementName = statementName;
+ this.encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
+ this.deallocateEpoch = deallocateEpoch;
+ }
- // Note: preparedTypes can be updated by "statement describe"
- // 1) parse(name="S_01", sql="select ?::timestamp", types={UNSPECIFIED})
- // 2) statement describe: bind 1 type is TIMESTAMP
- // 3) SimpleQuery.preparedTypes is updated to TIMESTAMP
- // ...
- // 4.1) bind(name="S_01", ..., types={TIMESTAMP}) -> OK (since preparedTypes is equal to TIMESTAMP)
- // 4.2) bind(name="S_01", ..., types={UNSPECIFIED}) -> OK (since the query was initially parsed with UNSPECIFIED)
- // 4.3) bind(name="S_01", ..., types={DATE}) -> KO, unprepare and parse required
+ int[] getPrepareTypes() {
+ return preparedTypes;
+ }
- int preparedType = preparedTypes[i];
- if (paramType != preparedType
- && (paramType != Oid.UNSPECIFIED
- || unspecified == null
- || !unspecified.get(i))) {
- if (LOGGER.isLoggable(Level.FINER)) {
- LOGGER.log(Level.FINER,
- "Statement {0} does not match new parameter types. Will have to un-prepare it and parse once again."
- + " To avoid performance issues, use the same data type for the same bind position. Bind index (1-based) is {1},"
- + " preparedType was {2} (after describe {3}), current bind type is {4}",
- new Object[]{statementName, i + 1,
- Oid.toString(unspecified != null && unspecified.get(i) ? 0 : preparedType),
- Oid.toString(preparedType), Oid.toString(paramType)});
+ void setPrepareTypes(int[] paramTypes) {
+ // Remember which parameters were unspecified since the parameters will be overridden later by
+ // ParameterDescription message
+ for (int i = 0; i < paramTypes.length; i++) {
+ int paramType = paramTypes[i];
+ if (paramType == Oid.UNSPECIFIED) {
+ if (this.unspecifiedParams == null) {
+ this.unspecifiedParams = new BitSet();
+ }
+ this.unspecifiedParams.set(i);
+ }
+ }
+
+ // paramTypes is changed by "describe statement" response, so we clone the array
+ // However, we can reuse array if there is one
+ if (this.preparedTypes == null) {
+ this.preparedTypes = paramTypes.clone();
+ return;
+ }
+ System.arraycopy(paramTypes, 0, this.preparedTypes, 0, paramTypes.length);
+ }
+
+ String getStatementName() {
+ return statementName;
+ }
+
+ boolean isPreparedFor(int[] paramTypes, short deallocateEpoch) {
+ if (statementName == null || preparedTypes == null) {
+ return false; // Not prepared.
+ }
+ if (this.deallocateEpoch != deallocateEpoch) {
+ return false;
+ }
+
+ assert paramTypes.length == preparedTypes.length
+ : String.format("paramTypes:%1$d preparedTypes:%2$d", paramTypes.length,
+ preparedTypes.length);
+ // Check for compatible types.
+ BitSet unspecified = this.unspecifiedParams;
+ for (int i = 0; i < paramTypes.length; i++) {
+ int paramType = paramTypes[i];
+ // Either paramType should match prepared type
+ // Or paramType==UNSPECIFIED and the prepare type was UNSPECIFIED
+
+ // Note: preparedTypes can be updated by "statement describe"
+ // 1) parse(name="S_01", sql="select ?::timestamp", types={UNSPECIFIED})
+ // 2) statement describe: bind 1 type is TIMESTAMP
+ // 3) SimpleQuery.preparedTypes is updated to TIMESTAMP
+ // ...
+ // 4.1) bind(name="S_01", ..., types={TIMESTAMP}) -> OK (since preparedTypes is equal to TIMESTAMP)
+ // 4.2) bind(name="S_01", ..., types={UNSPECIFIED}) -> OK (since the query was initially parsed with UNSPECIFIED)
+ // 4.3) bind(name="S_01", ..., types={DATE}) -> KO, unprepare and parse required
+
+ int preparedType = preparedTypes[i];
+ if (paramType != preparedType
+ && (paramType != Oid.UNSPECIFIED
+ || unspecified == null
+ || !unspecified.get(i))) {
+ if (LOGGER.isLoggable(Level.FINER)) {
+ LOGGER.log(Level.FINER,
+ "Statement {0} does not match new parameter types. Will have to un-prepare it and parse once again."
+ + " To avoid performance issues, use the same data type for the same bind position. Bind index (1-based) is {1},"
+ + " preparedType was {2} (after describe {3}), current bind type is {4}",
+ new Object[]{statementName, i + 1,
+ Oid.toString(unspecified != null && unspecified.get(i) ? 0 : preparedType),
+ Oid.toString(preparedType), Oid.toString(paramType)});
+ }
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ boolean hasUnresolvedTypes() {
+ if (preparedTypes == null) {
+ return true;
+ }
+
+ return this.unspecifiedParams != null && !this.unspecifiedParams.isEmpty();
+ }
+
+ byte[] getEncodedStatementName() {
+ return encodedStatementName;
+ }
+
+ /**
+ * Returns the fields that this query will return. If the result set fields are not known returns
+ * null.
+ *
+ * @return the fields that this query will return.
+ */
+ Field[] getFields() {
+ return fields;
+ }
+
+ /**
+ * Sets the fields that this query will return.
+ *
+ * @param fields The fields that this query will return.
+ */
+ void setFields(Field[] fields) {
+ this.fields = fields;
+ this.resultSetColumnNameIndexMap = null;
+ this.cachedMaxResultRowSize = null;
+ this.needUpdateFieldFormats = fields != null;
+ this.hasBinaryFields = false; // just in case
+ }
+
+ /**
+ * Returns true if current query needs field formats be adjusted as per connection configuration.
+ * Subsequent invocations would return {@code false}. The idea is to perform adjustments only
+ * once, not for each
+ * {@link QueryExecutorImpl#sendBind(SimpleQuery, SimpleParameterList, Portal, boolean)}
+ *
+ * @return true if current query needs field formats be adjusted as per connection configuration
+ */
+ boolean needUpdateFieldFormats() {
+ if (needUpdateFieldFormats) {
+ needUpdateFieldFormats = false;
+ return true;
}
return false;
- }
}
- return true;
- }
-
- boolean hasUnresolvedTypes() {
- if (preparedTypes == null) {
- return true;
+ public void resetNeedUpdateFieldFormats() {
+ needUpdateFieldFormats = fields != null;
}
- return this.unspecifiedParams != null && !this.unspecifiedParams.isEmpty();
- }
-
- byte [] getEncodedStatementName() {
- return encodedStatementName;
- }
-
- /**
- * Sets the fields that this query will return.
- *
- * @param fields The fields that this query will return.
- */
- void setFields(Field [] fields) {
- this.fields = fields;
- this.resultSetColumnNameIndexMap = null;
- this.cachedMaxResultRowSize = null;
- this.needUpdateFieldFormats = fields != null;
- this.hasBinaryFields = false; // just in case
- }
-
- /**
- * Returns the fields that this query will return. If the result set fields are not known returns
- * null.
- *
- * @return the fields that this query will return.
- */
- Field [] getFields() {
- return fields;
- }
-
- /**
- * Returns true if current query needs field formats be adjusted as per connection configuration.
- * Subsequent invocations would return {@code false}. The idea is to perform adjustments only
- * once, not for each
- * {@link QueryExecutorImpl#sendBind(SimpleQuery, SimpleParameterList, Portal, boolean)}
- *
- * @return true if current query needs field formats be adjusted as per connection configuration
- */
- boolean needUpdateFieldFormats() {
- if (needUpdateFieldFormats) {
- needUpdateFieldFormats = false;
- return true;
- }
- return false;
- }
-
- public void resetNeedUpdateFieldFormats() {
- needUpdateFieldFormats = fields != null;
- }
-
- public boolean hasBinaryFields() {
- return hasBinaryFields;
- }
-
- public void setHasBinaryFields(boolean hasBinaryFields) {
- this.hasBinaryFields = hasBinaryFields;
- }
-
- // Have we sent a Describe Portal message for this query yet?
- boolean isPortalDescribed() {
- return portalDescribed;
- }
-
- void setPortalDescribed(boolean portalDescribed) {
- this.portalDescribed = portalDescribed;
- this.cachedMaxResultRowSize = null;
- }
-
- // Have we sent a Describe Statement message for this query yet?
- // Note that we might not have need to, so this may always be false.
- @Override
- public boolean isStatementDescribed() {
- return statementDescribed;
- }
-
- void setStatementDescribed(boolean statementDescribed) {
- this.statementDescribed = statementDescribed;
- this.cachedMaxResultRowSize = null;
- }
-
- @Override
- public boolean isEmpty() {
- return getNativeSql().isEmpty();
- }
-
- void setCleanupRef(PhantomReference> cleanupRef) {
- PhantomReference> oldCleanupRef = this.cleanupRef;
- if (oldCleanupRef != null) {
- oldCleanupRef.clear();
- oldCleanupRef.enqueue();
- }
- this.cleanupRef = cleanupRef;
- }
-
- void unprepare() {
- PhantomReference> cleanupRef = this.cleanupRef;
- if (cleanupRef != null) {
- cleanupRef.clear();
- cleanupRef.enqueue();
- this.cleanupRef = null;
- }
- if (this.unspecifiedParams != null) {
- this.unspecifiedParams.clear();
+ public boolean hasBinaryFields() {
+ return hasBinaryFields;
}
- statementName = null;
- encodedStatementName = null;
- fields = null;
- this.resultSetColumnNameIndexMap = null;
- portalDescribed = false;
- statementDescribed = false;
- cachedMaxResultRowSize = null;
- }
-
- @Override
- public int getBatchSize() {
- return 1;
- }
-
- NativeQuery getNativeQuery() {
- return nativeQuery;
- }
-
- public final int getBindCount() {
- return nativeQuery.bindPositions.length * getBatchSize();
- }
-
- private Map resultSetColumnNameIndexMap;
-
- @Override
- public Map getResultSetColumnNameIndexMap() {
- Map columnPositions = this.resultSetColumnNameIndexMap;
- if (columnPositions == null && fields != null) {
- columnPositions =
- PgResultSet.createColumnNameIndexMap(fields, sanitiserDisabled);
- if (statementName != null) {
- // Cache column positions for server-prepared statements only
- this.resultSetColumnNameIndexMap = columnPositions;
- }
+ public void setHasBinaryFields(boolean hasBinaryFields) {
+ this.hasBinaryFields = hasBinaryFields;
}
- return columnPositions;
- }
- @Override
- public SqlCommand getSqlCommand() {
- return nativeQuery.getCommand();
- }
+ // Have we sent a Describe Portal message for this query yet?
+ boolean isPortalDescribed() {
+ return portalDescribed;
+ }
- private final NativeQuery nativeQuery;
+ void setPortalDescribed(boolean portalDescribed) {
+ this.portalDescribed = portalDescribed;
+ this.cachedMaxResultRowSize = null;
+ }
- private final TypeTransferModeRegistry transferModeRegistry;
- private String statementName;
- private byte [] encodedStatementName;
- /**
- * The stored fields from previous execution or describe of a prepared statement. Always null for
- * non-prepared statements.
- */
- private Field [] fields;
- private boolean needUpdateFieldFormats;
- private boolean hasBinaryFields;
- private boolean portalDescribed;
- private boolean statementDescribed;
- private final boolean sanitiserDisabled;
- private PhantomReference> cleanupRef;
- private int [] preparedTypes;
- private BitSet unspecifiedParams;
- private short deallocateEpoch;
+ // Have we sent a Describe Statement message for this query yet?
+ // Note that we might not have need to, so this may always be false.
+ @Override
+ public boolean isStatementDescribed() {
+ return statementDescribed;
+ }
- private Integer cachedMaxResultRowSize;
+ void setStatementDescribed(boolean statementDescribed) {
+ this.statementDescribed = statementDescribed;
+ this.cachedMaxResultRowSize = null;
+ }
- static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null);
+ @Override
+ public boolean isEmpty() {
+ return getNativeSql().isEmpty();
+ }
+
+ void setCleanupRef(PhantomReference> cleanupRef) {
+ PhantomReference> oldCleanupRef = this.cleanupRef;
+ if (oldCleanupRef != null) {
+ oldCleanupRef.clear();
+ oldCleanupRef.enqueue();
+ }
+ this.cleanupRef = cleanupRef;
+ }
+
+ void unprepare() {
+ PhantomReference> cleanupRef = this.cleanupRef;
+ if (cleanupRef != null) {
+ cleanupRef.clear();
+ cleanupRef.enqueue();
+ this.cleanupRef = null;
+ }
+ if (this.unspecifiedParams != null) {
+ this.unspecifiedParams.clear();
+ }
+
+ statementName = null;
+ encodedStatementName = null;
+ fields = null;
+ this.resultSetColumnNameIndexMap = null;
+ portalDescribed = false;
+ statementDescribed = false;
+ cachedMaxResultRowSize = null;
+ }
+
+ @Override
+ public int getBatchSize() {
+ return 1;
+ }
+
+ NativeQuery getNativeQuery() {
+ return nativeQuery;
+ }
+
+ public final int getBindCount() {
+ return nativeQuery.bindPositions.length * getBatchSize();
+ }
+
+ @Override
+ public Map getResultSetColumnNameIndexMap() {
+ Map columnPositions = this.resultSetColumnNameIndexMap;
+ if (columnPositions == null && fields != null) {
+ columnPositions =
+ PgResultSet.createColumnNameIndexMap(fields, sanitiserDisabled);
+ if (statementName != null) {
+ // Cache column positions for server-prepared statements only
+ this.resultSetColumnNameIndexMap = columnPositions;
+ }
+ }
+ return columnPositions;
+ }
+
+ @Override
+ public SqlCommand getSqlCommand() {
+ return nativeQuery.getCommand();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java b/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java
index c50570c..02bdb0d 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java
@@ -6,17 +6,19 @@
package org.postgresql.core.v3;
public interface TypeTransferModeRegistry {
- /**
- * Returns if given oid should be sent in binary format.
- * @param oid type oid
- * @return true if given oid should be sent in binary format
- */
- boolean useBinaryForSend(int oid);
+ /**
+ * Returns if given oid should be sent in binary format.
+ *
+ * @param oid type oid
+ * @return true if given oid should be sent in binary format
+ */
+ boolean useBinaryForSend(int oid);
- /**
- * Returns if given oid should be received in binary format.
- * @param oid type oid
- * @return true if given oid should be received in binary format
- */
- boolean useBinaryForReceive(int oid);
+ /**
+ * Returns if given oid should be received in binary format.
+ *
+ * @param oid type oid
+ * @return true if given oid should be received in binary format
+ */
+ boolean useBinaryForReceive(int oid);
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java
index c49e0e0..92a6191 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java
@@ -6,9 +6,8 @@
package org.postgresql.core.v3;
-import org.postgresql.core.ParameterList;
-
import java.sql.SQLException;
+import org.postgresql.core.ParameterList;
/**
* Common interface for all V3 parameter list implementations.
@@ -16,45 +15,48 @@ import java.sql.SQLException;
* @author Oliver Jowett (oliver@opencloud.com)
*/
interface V3ParameterList extends ParameterList {
- /**
- * Ensure that all parameters in this list have been assigned values. Return silently if all is
- * well, otherwise throw an appropriate exception.
- *
- * @throws SQLException if not all parameters are set.
- */
- void checkAllParametersSet() throws SQLException;
+ /**
+ * Ensure that all parameters in this list have been assigned values. Return silently if all is
+ * well, otherwise throw an appropriate exception.
+ *
+ * @throws SQLException if not all parameters are set.
+ */
+ void checkAllParametersSet() throws SQLException;
- /**
- * Convert any function output parameters to the correct type (void) and set an ignorable value
- * for it.
- */
- void convertFunctionOutParameters();
+ /**
+ * Convert any function output parameters to the correct type (void) and set an ignorable value
+ * for it.
+ */
+ void convertFunctionOutParameters();
- /**
- * Return a list of the SimpleParameterList objects that make up this parameter list. If this
- * object is already a SimpleParameterList, returns null (avoids an extra array construction in
- * the common case).
- *
- * @return an array of single-statement parameter lists, or null if this object is
- * already a single-statement parameter list.
- */
- SimpleParameterList [] getSubparams();
+ /**
+ * Return a list of the SimpleParameterList objects that make up this parameter list. If this
+ * object is already a SimpleParameterList, returns null (avoids an extra array construction in
+ * the common case).
+ *
+ * @return an array of single-statement parameter lists, or null if this object is
+ * already a single-statement parameter list.
+ */
+ SimpleParameterList[] getSubparams();
- /**
- * Return the parameter type information.
- * @return an array of {@link org.postgresql.core.Oid} type information
- */
- int [] getParamTypes();
+ /**
+ * Return the parameter type information.
+ *
+ * @return an array of {@link org.postgresql.core.Oid} type information
+ */
+ int[] getParamTypes();
- /**
- * Return the flags for each parameter.
- * @return an array of bytes used to store flags.
- */
- byte [] getFlags();
+ /**
+ * Return the flags for each parameter.
+ *
+ * @return an array of bytes used to store flags.
+ */
+ byte[] getFlags();
- /**
- * Return the encoding for each parameter.
- * @return nested byte array of bytes with encoding information.
- */
- byte [] [] getEncoding();
+ /**
+ * Return the encoding for each parameter.
+ *
+ * @return nested byte array of bytes with encoding information.
+ */
+ byte[][] getEncoding();
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java
index 83e1c92..e2d68fa 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java
@@ -5,13 +5,12 @@
package org.postgresql.core.v3.adaptivefetch;
-import org.postgresql.PGProperty;
-import org.postgresql.core.Query;
-
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
+import org.postgresql.PGProperty;
+import org.postgresql.core.Query;
/**
* The main purpose of this class is to handle adaptive fetching process. Adaptive fetching is used
@@ -25,173 +24,173 @@ import java.util.Properties;
*/
public class AdaptiveFetchCache {
- private final Map adaptiveFetchInfoMap;
- private boolean adaptiveFetch;
- private final int minimumAdaptiveFetchSize;
- private int maximumAdaptiveFetchSize = -1;
- private long maximumResultBufferSize = -1;
+ private final Map adaptiveFetchInfoMap;
+ private final int minimumAdaptiveFetchSize;
+ private boolean adaptiveFetch;
+ private int maximumAdaptiveFetchSize = -1;
+ private long maximumResultBufferSize = -1;
- public AdaptiveFetchCache(long maximumResultBufferSize, Properties info)
- throws SQLException {
- this.adaptiveFetchInfoMap = new HashMap<>();
+ public AdaptiveFetchCache(long maximumResultBufferSize, Properties info)
+ throws SQLException {
+ this.adaptiveFetchInfoMap = new HashMap<>();
- this.adaptiveFetch = PGProperty.ADAPTIVE_FETCH.getBoolean(info);
- this.minimumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MINIMUM.getInt(info);
- this.maximumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MAXIMUM.getInt(info);
+ this.adaptiveFetch = PGProperty.ADAPTIVE_FETCH.getBoolean(info);
+ this.minimumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MINIMUM.getInt(info);
+ this.maximumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MAXIMUM.getInt(info);
- this.maximumResultBufferSize = maximumResultBufferSize;
- }
-
- /**
- * Add query to being cached and computing adaptive fetch size.
- *
- * @param adaptiveFetch state of adaptive fetch, which should be used during adding query
- * @param query query to be cached
- */
- public void addNewQuery(boolean adaptiveFetch, Query query) {
- if (adaptiveFetch && maximumResultBufferSize != -1) {
- String sql = query.getNativeSql().trim();
- AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
- if (adaptiveFetchCacheEntry == null) {
- adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
- }
- adaptiveFetchCacheEntry.incrementCounter();
-
- adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+ this.maximumResultBufferSize = maximumResultBufferSize;
}
- }
- /**
- * Update adaptive fetch size for given query.
- *
- * @param adaptiveFetch state of adaptive fetch, which should be used during updating fetch
- * size for query
- * @param query query to be updated
- * @param maximumRowSizeBytes max row size used during updating information about adaptive fetch
- * size for given query
- */
- public void updateQueryFetchSize(boolean adaptiveFetch, Query query, int maximumRowSizeBytes) {
- if (adaptiveFetch && maximumResultBufferSize != -1) {
- String sql = query.getNativeSql().trim();
- AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
- if (adaptiveFetchCacheEntry != null) {
- int adaptiveMaximumRowSize = adaptiveFetchCacheEntry.getMaximumRowSizeBytes();
- if (adaptiveMaximumRowSize < maximumRowSizeBytes && maximumRowSizeBytes > 0) {
- int newFetchSize = (int) (maximumResultBufferSize / maximumRowSizeBytes);
- newFetchSize = adjustFetchSize(newFetchSize);
+ /**
+ * Add query to being cached and computing adaptive fetch size.
+ *
+ * @param adaptiveFetch state of adaptive fetch, which should be used during adding query
+ * @param query query to be cached
+ */
+ public void addNewQuery(boolean adaptiveFetch, Query query) {
+ if (adaptiveFetch && maximumResultBufferSize != -1) {
+ String sql = query.getNativeSql().trim();
+ AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+ if (adaptiveFetchCacheEntry == null) {
+ adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+ }
+ adaptiveFetchCacheEntry.incrementCounter();
- adaptiveFetchCacheEntry.setMaximumRowSizeBytes(maximumRowSizeBytes);
- adaptiveFetchCacheEntry.setSize(newFetchSize);
-
- adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+ adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
}
- }
}
- }
- /**
- * Get adaptive fetch size for given query.
- *
- * @param adaptiveFetch state of adaptive fetch, which should be used during getting fetch size
- * for query
- * @param query query to which we want get adaptive fetch size
- * @return adaptive fetch size for query or -1 if size doesn't exist/adaptive fetch state is false
- */
- public int getFetchSizeForQuery(boolean adaptiveFetch, Query query) {
- if (adaptiveFetch && maximumResultBufferSize != -1) {
- String sql = query.getNativeSql().trim();
- AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
- if (adaptiveFetchCacheEntry != null) {
- return adaptiveFetchCacheEntry.getSize();
- }
+ /**
+ * Update adaptive fetch size for given query.
+ *
+ * @param adaptiveFetch state of adaptive fetch, which should be used during updating fetch
+ * size for query
+ * @param query query to be updated
+ * @param maximumRowSizeBytes max row size used during updating information about adaptive fetch
+ * size for given query
+ */
+ public void updateQueryFetchSize(boolean adaptiveFetch, Query query, int maximumRowSizeBytes) {
+ if (adaptiveFetch && maximumResultBufferSize != -1) {
+ String sql = query.getNativeSql().trim();
+ AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+ if (adaptiveFetchCacheEntry != null) {
+ int adaptiveMaximumRowSize = adaptiveFetchCacheEntry.getMaximumRowSizeBytes();
+ if (adaptiveMaximumRowSize < maximumRowSizeBytes && maximumRowSizeBytes > 0) {
+ int newFetchSize = (int) (maximumResultBufferSize / maximumRowSizeBytes);
+ newFetchSize = adjustFetchSize(newFetchSize);
+
+ adaptiveFetchCacheEntry.setMaximumRowSizeBytes(maximumRowSizeBytes);
+ adaptiveFetchCacheEntry.setSize(newFetchSize);
+
+ adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+ }
+ }
+ }
}
- return -1;
- }
- /**
- * Remove query information from caching.
- *
- * @param adaptiveFetch state of adaptive fetch, which should be used during removing fetch size
- * for query
- * @param query query to be removed from caching
- */
- public void removeQuery(boolean adaptiveFetch, Query query) {
- if (adaptiveFetch && maximumResultBufferSize != -1) {
- String sql = query.getNativeSql().trim();
- AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
- if (adaptiveFetchCacheEntry != null) {
- adaptiveFetchCacheEntry.decrementCounter();
+ /**
+ * Get adaptive fetch size for given query.
+ *
+ * @param adaptiveFetch state of adaptive fetch, which should be used during getting fetch size
+ * for query
+ * @param query query to which we want get adaptive fetch size
+ * @return adaptive fetch size for query or -1 if size doesn't exist/adaptive fetch state is false
+ */
+ public int getFetchSizeForQuery(boolean adaptiveFetch, Query query) {
+ if (adaptiveFetch && maximumResultBufferSize != -1) {
+ String sql = query.getNativeSql().trim();
+ AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+ if (adaptiveFetchCacheEntry != null) {
+ return adaptiveFetchCacheEntry.getSize();
+ }
+ }
+ return -1;
+ }
- if (adaptiveFetchCacheEntry.getCounter() < 1) {
- adaptiveFetchInfoMap.remove(sql);
+ /**
+ * Remove query information from caching.
+ *
+ * @param adaptiveFetch state of adaptive fetch, which should be used during removing fetch size
+ * for query
+ * @param query query to be removed from caching
+ */
+ public void removeQuery(boolean adaptiveFetch, Query query) {
+ if (adaptiveFetch && maximumResultBufferSize != -1) {
+ String sql = query.getNativeSql().trim();
+ AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+ if (adaptiveFetchCacheEntry != null) {
+ adaptiveFetchCacheEntry.decrementCounter();
+
+ if (adaptiveFetchCacheEntry.getCounter() < 1) {
+ adaptiveFetchInfoMap.remove(sql);
+ } else {
+ adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+ }
+ }
+ }
+ }
+
+ /**
+ * Set maximum and minimum constraints on given value.
+ *
+ * @param actualSize value which should be the computed fetch size
+ * @return value which meet the constraints
+ */
+ private int adjustFetchSize(int actualSize) {
+ int size = adjustMaximumFetchSize(actualSize);
+ size = adjustMinimumFetchSize(size);
+ return size;
+ }
+
+ /**
+ * Set minimum constraint on given value.
+ *
+ * @param actualSize value which should be the computed fetch size
+ * @return value which meet the minimum constraint
+ */
+ private int adjustMinimumFetchSize(int actualSize) {
+ if (minimumAdaptiveFetchSize == 0) {
+ return actualSize;
+ }
+ if (minimumAdaptiveFetchSize > actualSize) {
+ return minimumAdaptiveFetchSize;
} else {
- adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+ return actualSize;
}
- }
}
- }
- /**
- * Set maximum and minimum constraints on given value.
- *
- * @param actualSize value which should be the computed fetch size
- * @return value which meet the constraints
- */
- private int adjustFetchSize(int actualSize) {
- int size = adjustMaximumFetchSize(actualSize);
- size = adjustMinimumFetchSize(size);
- return size;
- }
-
- /**
- * Set minimum constraint on given value.
- *
- * @param actualSize value which should be the computed fetch size
- * @return value which meet the minimum constraint
- */
- private int adjustMinimumFetchSize(int actualSize) {
- if (minimumAdaptiveFetchSize == 0) {
- return actualSize;
+ /**
+ * Set maximum constraint on given value.
+ *
+ * @param actualSize value which should be the computed fetch size
+ * @return value which meet the maximum constraint
+ */
+ private int adjustMaximumFetchSize(int actualSize) {
+ if (maximumAdaptiveFetchSize == -1) {
+ return actualSize;
+ }
+ if (maximumAdaptiveFetchSize < actualSize) {
+ return maximumAdaptiveFetchSize;
+ } else {
+ return actualSize;
+ }
}
- if (minimumAdaptiveFetchSize > actualSize) {
- return minimumAdaptiveFetchSize;
- } else {
- return actualSize;
- }
- }
- /**
- * Set maximum constraint on given value.
- *
- * @param actualSize value which should be the computed fetch size
- * @return value which meet the maximum constraint
- */
- private int adjustMaximumFetchSize(int actualSize) {
- if (maximumAdaptiveFetchSize == -1) {
- return actualSize;
+ /**
+ * Get state of adaptive fetch.
+ *
+ * @return state of adaptive fetch
+ */
+ public boolean getAdaptiveFetch() {
+ return adaptiveFetch;
}
- if (maximumAdaptiveFetchSize < actualSize) {
- return maximumAdaptiveFetchSize;
- } else {
- return actualSize;
+
+ /**
+ * Set state of adaptive fetch.
+ *
+ * @param adaptiveFetch desired state of adaptive fetch
+ */
+ public void setAdaptiveFetch(boolean adaptiveFetch) {
+ this.adaptiveFetch = adaptiveFetch;
}
- }
-
- /**
- * Get state of adaptive fetch.
- *
- * @return state of adaptive fetch
- */
- public boolean getAdaptiveFetch() {
- return adaptiveFetch;
- }
-
- /**
- * Set state of adaptive fetch.
- *
- * @param adaptiveFetch desired state of adaptive fetch
- */
- public void setAdaptiveFetch(boolean adaptiveFetch) {
- this.adaptiveFetch = adaptiveFetch;
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java
index 97a12ff..5b4511c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java
@@ -7,39 +7,39 @@ package org.postgresql.core.v3.adaptivefetch;
public class AdaptiveFetchCacheEntry {
- private int size = -1; // Holds information about adaptive fetch size for query
- private int counter; // Number of queries in execution using that query info
- private int maximumRowSizeBytes = -1; // Maximum row size in bytes saved for query so far
+ private int size = -1; // Holds information about adaptive fetch size for query
+ private int counter; // Number of queries in execution using that query info
+ private int maximumRowSizeBytes = -1; // Maximum row size in bytes saved for query so far
- public int getSize() {
- return size;
- }
+ public int getSize() {
+ return size;
+ }
- public void setSize(int size) {
- this.size = size;
- }
+ public void setSize(int size) {
+ this.size = size;
+ }
- public int getCounter() {
- return counter;
- }
+ public int getCounter() {
+ return counter;
+ }
- public void setCounter(int counter) {
- this.counter = counter;
- }
+ public void setCounter(int counter) {
+ this.counter = counter;
+ }
- public int getMaximumRowSizeBytes() {
- return maximumRowSizeBytes;
- }
+ public int getMaximumRowSizeBytes() {
+ return maximumRowSizeBytes;
+ }
- public void setMaximumRowSizeBytes(int maximumRowSizeBytes) {
- this.maximumRowSizeBytes = maximumRowSizeBytes;
- }
+ public void setMaximumRowSizeBytes(int maximumRowSizeBytes) {
+ this.maximumRowSizeBytes = maximumRowSizeBytes;
+ }
- public void incrementCounter() {
- counter++;
- }
+ public void incrementCounter() {
+ counter++;
+ }
- public void decrementCounter() {
- counter--;
- }
+ public void decrementCounter() {
+ counter--;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java
index f1fb9fa..9d31f57 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java
@@ -5,14 +5,6 @@
package org.postgresql.core.v3.replication;
-import org.postgresql.copy.CopyDual;
-import org.postgresql.replication.LogSequenceNumber;
-import org.postgresql.replication.PGReplicationStream;
-import org.postgresql.replication.ReplicationType;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.sql.SQLException;
@@ -20,279 +12,286 @@ import java.util.Date;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
+import org.postgresql.copy.CopyDual;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.replication.ReplicationType;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
public class V3PGReplicationStream implements PGReplicationStream {
- private static final Logger LOGGER = Logger.getLogger(V3PGReplicationStream.class.getName());
- public static final long POSTGRES_EPOCH_2000_01_01 = 946684800000L;
- private static final long NANOS_PER_MILLISECOND = 1000000L;
+ public static final long POSTGRES_EPOCH_2000_01_01 = 946684800000L;
+ private static final Logger LOGGER = Logger.getLogger(V3PGReplicationStream.class.getName());
+ private static final long NANOS_PER_MILLISECOND = 1000000L;
- private final CopyDual copyDual;
- private final long updateInterval;
- private final ReplicationType replicationType;
- private long lastStatusUpdate;
- private boolean closeFlag;
+ private final CopyDual copyDual;
+ private final long updateInterval;
+ private final ReplicationType replicationType;
+ private long lastStatusUpdate;
+ private boolean closeFlag;
- private LogSequenceNumber lastServerLSN = LogSequenceNumber.INVALID_LSN;
- /**
- * Last receive LSN + payload size.
- */
- private volatile LogSequenceNumber lastReceiveLSN = LogSequenceNumber.INVALID_LSN;
- private volatile LogSequenceNumber lastAppliedLSN = LogSequenceNumber.INVALID_LSN;
- private volatile LogSequenceNumber lastFlushedLSN = LogSequenceNumber.INVALID_LSN;
- private volatile LogSequenceNumber startOfLastMessageLSN = LogSequenceNumber.INVALID_LSN;
- private volatile LogSequenceNumber explicitlyFlushedLSN = LogSequenceNumber.INVALID_LSN;
+ private LogSequenceNumber lastServerLSN = LogSequenceNumber.INVALID_LSN;
+ /**
+ * Last receive LSN + payload size.
+ */
+ private volatile LogSequenceNumber lastReceiveLSN = LogSequenceNumber.INVALID_LSN;
+ private volatile LogSequenceNumber lastAppliedLSN = LogSequenceNumber.INVALID_LSN;
+ private volatile LogSequenceNumber lastFlushedLSN = LogSequenceNumber.INVALID_LSN;
+ private volatile LogSequenceNumber startOfLastMessageLSN = LogSequenceNumber.INVALID_LSN;
+ private volatile LogSequenceNumber explicitlyFlushedLSN = LogSequenceNumber.INVALID_LSN;
- /**
- * @param copyDual bidirectional copy protocol
- * @param startLSN the position in the WAL that we want to initiate replication from
- * usually the currentLSN returned by calling pg_current_wal_lsn()for v10
- * above or pg_current_xlog_location() depending on the version of the
- * server
- * @param updateIntervalMs the number of millisecond between status packets sent back to the
- * server. A value of zero disables the periodic status updates
- * completely, although an update will still be sent when requested by the
- * server, to avoid timeout disconnect.
- * @param replicationType LOGICAL or PHYSICAL
- */
- public V3PGReplicationStream(CopyDual copyDual, LogSequenceNumber startLSN, long updateIntervalMs,
- ReplicationType replicationType
- ) {
- this.copyDual = copyDual;
- this.updateInterval = updateIntervalMs * NANOS_PER_MILLISECOND;
- this.lastStatusUpdate = System.nanoTime() - (updateIntervalMs * NANOS_PER_MILLISECOND);
- this.lastReceiveLSN = startLSN;
- this.replicationType = replicationType;
- }
-
- @Override
- public ByteBuffer read() throws SQLException {
- checkClose();
-
- ByteBuffer payload = null;
- while (payload == null && copyDual.isActive()) {
- payload = readInternal(true);
+ /**
+ * @param copyDual bidirectional copy protocol
+ * @param startLSN the position in the WAL that we want to initiate replication from
+ * usually the currentLSN returned by calling pg_current_wal_lsn()for v10
+ * above or pg_current_xlog_location() depending on the version of the
+ * server
+ * @param updateIntervalMs the number of millisecond between status packets sent back to the
+ * server. A value of zero disables the periodic status updates
+ * completely, although an update will still be sent when requested by the
+ * server, to avoid timeout disconnect.
+ * @param replicationType LOGICAL or PHYSICAL
+ */
+ public V3PGReplicationStream(CopyDual copyDual, LogSequenceNumber startLSN, long updateIntervalMs,
+ ReplicationType replicationType
+ ) {
+ this.copyDual = copyDual;
+ this.updateInterval = updateIntervalMs * NANOS_PER_MILLISECOND;
+ this.lastStatusUpdate = System.nanoTime() - (updateIntervalMs * NANOS_PER_MILLISECOND);
+ this.lastReceiveLSN = startLSN;
+ this.replicationType = replicationType;
}
- return payload;
- }
+ @Override
+ public ByteBuffer read() throws SQLException {
+ checkClose();
- @Override
- public ByteBuffer readPending() throws SQLException {
- checkClose();
- return readInternal(false);
- }
+ ByteBuffer payload = null;
+ while (payload == null && copyDual.isActive()) {
+ payload = readInternal(true);
+ }
- @Override
- public LogSequenceNumber getLastReceiveLSN() {
- return lastReceiveLSN;
- }
+ return payload;
+ }
- @Override
- public LogSequenceNumber getLastFlushedLSN() {
- return lastFlushedLSN;
- }
+ @Override
+ public ByteBuffer readPending() throws SQLException {
+ checkClose();
+ return readInternal(false);
+ }
- @Override
- public LogSequenceNumber getLastAppliedLSN() {
- return lastAppliedLSN;
- }
+ @Override
+ public LogSequenceNumber getLastReceiveLSN() {
+ return lastReceiveLSN;
+ }
- @Override
- public void setFlushedLSN(LogSequenceNumber flushed) {
- this.lastFlushedLSN = flushed;
- }
+ @Override
+ public LogSequenceNumber getLastFlushedLSN() {
+ return lastFlushedLSN;
+ }
- @Override
- public void setAppliedLSN(LogSequenceNumber applied) {
- this.lastAppliedLSN = applied;
- }
+ @Override
+ public LogSequenceNumber getLastAppliedLSN() {
+ return lastAppliedLSN;
+ }
- @Override
- public void forceUpdateStatus() throws SQLException {
- checkClose();
- updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, true);
- }
+ @Override
+ public void setFlushedLSN(LogSequenceNumber flushed) {
+ this.lastFlushedLSN = flushed;
+ }
- @Override
- public boolean isClosed() {
- return closeFlag || !copyDual.isActive();
- }
+ @Override
+ public void setAppliedLSN(LogSequenceNumber applied) {
+ this.lastAppliedLSN = applied;
+ }
- private ByteBuffer readInternal(boolean block) throws SQLException {
- boolean updateStatusRequired = false;
- while (copyDual.isActive()) {
+ @Override
+ public void forceUpdateStatus() throws SQLException {
+ checkClose();
+ updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, true);
+ }
- ByteBuffer buffer = receiveNextData(block);
+ @Override
+ public boolean isClosed() {
+ return closeFlag || !copyDual.isActive();
+ }
- if (updateStatusRequired || isTimeUpdate()) {
- timeUpdateStatus();
- }
+ private ByteBuffer readInternal(boolean block) throws SQLException {
+ boolean updateStatusRequired = false;
+ while (copyDual.isActive()) {
+
+ ByteBuffer buffer = receiveNextData(block);
+
+ if (updateStatusRequired || isTimeUpdate()) {
+ timeUpdateStatus();
+ }
+
+ if (buffer == null) {
+ return null;
+ }
+
+ int code = buffer.get();
+
+ switch (code) {
+
+ case 'k': //KeepAlive message
+ updateStatusRequired = processKeepAliveMessage(buffer);
+ updateStatusRequired |= updateInterval == 0;
+ break;
+
+ case 'w': //XLogData
+ return processXLogData(buffer);
+
+ default:
+ throw new PSQLException(
+ GT.tr("Unexpected packet type during replication: {0}", Integer.toString(code)),
+ PSQLState.PROTOCOL_VIOLATION
+ );
+ }
+ }
- if (buffer == null) {
return null;
- }
-
- int code = buffer.get();
-
- switch (code) {
-
- case 'k': //KeepAlive message
- updateStatusRequired = processKeepAliveMessage(buffer);
- updateStatusRequired |= updateInterval == 0;
- break;
-
- case 'w': //XLogData
- return processXLogData(buffer);
-
- default:
- throw new PSQLException(
- GT.tr("Unexpected packet type during replication: {0}", Integer.toString(code)),
- PSQLState.PROTOCOL_VIOLATION
- );
- }
}
- return null;
- }
+ private ByteBuffer receiveNextData(boolean block) throws SQLException {
+ try {
+ byte[] message = copyDual.readFromCopy(block);
+ if (message != null) {
+ return ByteBuffer.wrap(message);
+ } else {
+ return null;
+ }
+ } catch (PSQLException e) { //todo maybe replace on thread sleep?
+ if (e.getCause() instanceof SocketTimeoutException) {
+ //signal for keep alive
+ return null;
+ }
- private ByteBuffer receiveNextData(boolean block) throws SQLException {
- try {
- byte[] message = copyDual.readFromCopy(block);
- if (message != null) {
- return ByteBuffer.wrap(message);
- } else {
- return null;
- }
- } catch (PSQLException e) { //todo maybe replace on thread sleep?
- if (e.getCause() instanceof SocketTimeoutException) {
- //signal for keep alive
- return null;
- }
-
- throw e;
- }
- }
-
- private boolean isTimeUpdate() {
- /* a value of 0 disables automatic updates */
- if ( updateInterval == 0 ) {
- return false;
- }
- long diff = System.nanoTime() - lastStatusUpdate;
- return diff >= updateInterval;
- }
-
- private void timeUpdateStatus() throws SQLException {
- updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, false);
- }
-
- private void updateStatusInternal(
- LogSequenceNumber received, LogSequenceNumber flushed, LogSequenceNumber applied,
- boolean replyRequired)
- throws SQLException {
- byte[] reply = prepareUpdateStatus(received, flushed, applied, replyRequired);
- copyDual.writeToCopy(reply, 0, reply.length);
- copyDual.flushCopy();
-
- explicitlyFlushedLSN = flushed;
- lastStatusUpdate = System.nanoTime();
- }
-
- private byte[] prepareUpdateStatus(LogSequenceNumber received, LogSequenceNumber flushed,
- LogSequenceNumber applied, boolean replyRequired) {
- ByteBuffer byteBuffer = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1);
-
- long now = System.nanoTime() / NANOS_PER_MILLISECOND;
- long systemClock = TimeUnit.MICROSECONDS.convert((now - POSTGRES_EPOCH_2000_01_01),
- TimeUnit.MICROSECONDS);
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " FE=> StandbyStatusUpdate(received: {0}, flushed: {1}, applied: {2}, clock: {3})",
- new Object[]{received.asString(), flushed.asString(), applied.asString(), new Date(now)});
+ throw e;
+ }
}
- byteBuffer.put((byte) 'r');
- byteBuffer.putLong(received.asLong());
- byteBuffer.putLong(flushed.asLong());
- byteBuffer.putLong(applied.asLong());
- byteBuffer.putLong(systemClock);
- if (replyRequired) {
- byteBuffer.put((byte) 1);
- } else {
- byteBuffer.put(received == LogSequenceNumber.INVALID_LSN ? (byte) 1 : (byte) 0);
+ private boolean isTimeUpdate() {
+ /* a value of 0 disables automatic updates */
+ if (updateInterval == 0) {
+ return false;
+ }
+ long diff = System.nanoTime() - lastStatusUpdate;
+ return diff >= updateInterval;
}
- lastStatusUpdate = now;
- return byteBuffer.array();
- }
-
- private boolean processKeepAliveMessage(ByteBuffer buffer) {
- lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
- if (lastServerLSN.asLong() > lastReceiveLSN.asLong()) {
- lastReceiveLSN = lastServerLSN;
- }
- // if the client has confirmed flush of last XLogData msg and KeepAlive shows ServerLSN is still
- // advancing, we can safely advance FlushLSN to ServerLSN
- if (explicitlyFlushedLSN.asLong() >= startOfLastMessageLSN.asLong()
- && lastServerLSN.asLong() > explicitlyFlushedLSN.asLong()
- && lastServerLSN.asLong() > lastFlushedLSN.asLong()) {
- lastFlushedLSN = lastServerLSN;
+ private void timeUpdateStatus() throws SQLException {
+ updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, false);
}
- long lastServerClock = buffer.getLong();
+ private void updateStatusInternal(
+ LogSequenceNumber received, LogSequenceNumber flushed, LogSequenceNumber applied,
+ boolean replyRequired)
+ throws SQLException {
+ byte[] reply = prepareUpdateStatus(received, flushed, applied, replyRequired);
+ copyDual.writeToCopy(reply, 0, reply.length);
+ copyDual.flushCopy();
- boolean replyRequired = buffer.get() != 0;
-
- if (LOGGER.isLoggable(Level.FINEST)) {
- Date clockTime = new Date(
- TimeUnit.MILLISECONDS.convert(lastServerClock, TimeUnit.MICROSECONDS)
- + POSTGRES_EPOCH_2000_01_01);
- LOGGER.log(Level.FINEST, " <=BE Keepalive(lastServerWal: {0}, clock: {1} needReply: {2})",
- new Object[]{lastServerLSN.asString(), clockTime, replyRequired});
+ explicitlyFlushedLSN = flushed;
+ lastStatusUpdate = System.nanoTime();
}
- return replyRequired;
- }
+ private byte[] prepareUpdateStatus(LogSequenceNumber received, LogSequenceNumber flushed,
+ LogSequenceNumber applied, boolean replyRequired) {
+ ByteBuffer byteBuffer = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1);
- private ByteBuffer processXLogData(ByteBuffer buffer) {
- long startLsn = buffer.getLong();
- startOfLastMessageLSN = LogSequenceNumber.valueOf(startLsn);
- lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
- long systemClock = buffer.getLong();
+ long now = System.nanoTime() / NANOS_PER_MILLISECOND;
+ long systemClock = TimeUnit.MICROSECONDS.convert((now - POSTGRES_EPOCH_2000_01_01),
+ TimeUnit.MICROSECONDS);
- if (replicationType == ReplicationType.LOGICAL) {
- lastReceiveLSN = LogSequenceNumber.valueOf(startLsn);
- } else if (replicationType == ReplicationType.PHYSICAL) {
- int payloadSize = buffer.limit() - buffer.position();
- lastReceiveLSN = LogSequenceNumber.valueOf(startLsn + payloadSize);
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " FE=> StandbyStatusUpdate(received: {0}, flushed: {1}, applied: {2}, clock: {3})",
+ new Object[]{received.asString(), flushed.asString(), applied.asString(), new Date(now)});
+ }
+
+ byteBuffer.put((byte) 'r');
+ byteBuffer.putLong(received.asLong());
+ byteBuffer.putLong(flushed.asLong());
+ byteBuffer.putLong(applied.asLong());
+ byteBuffer.putLong(systemClock);
+ if (replyRequired) {
+ byteBuffer.put((byte) 1);
+ } else {
+ byteBuffer.put(received == LogSequenceNumber.INVALID_LSN ? (byte) 1 : (byte) 0);
+ }
+
+ lastStatusUpdate = now;
+ return byteBuffer.array();
}
- if (LOGGER.isLoggable(Level.FINEST)) {
- LOGGER.log(Level.FINEST, " <=BE XLogData(currWal: {0}, lastServerWal: {1}, clock: {2})",
- new Object[]{lastReceiveLSN.asString(), lastServerLSN.asString(), systemClock});
+ private boolean processKeepAliveMessage(ByteBuffer buffer) {
+ lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
+ if (lastServerLSN.asLong() > lastReceiveLSN.asLong()) {
+ lastReceiveLSN = lastServerLSN;
+ }
+ // if the client has confirmed flush of last XLogData msg and KeepAlive shows ServerLSN is still
+ // advancing, we can safely advance FlushLSN to ServerLSN
+ if (explicitlyFlushedLSN.asLong() >= startOfLastMessageLSN.asLong()
+ && lastServerLSN.asLong() > explicitlyFlushedLSN.asLong()
+ && lastServerLSN.asLong() > lastFlushedLSN.asLong()) {
+ lastFlushedLSN = lastServerLSN;
+ }
+
+ long lastServerClock = buffer.getLong();
+
+ boolean replyRequired = buffer.get() != 0;
+
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ Date clockTime = new Date(
+ TimeUnit.MILLISECONDS.convert(lastServerClock, TimeUnit.MICROSECONDS)
+ + POSTGRES_EPOCH_2000_01_01);
+ LOGGER.log(Level.FINEST, " <=BE Keepalive(lastServerWal: {0}, clock: {1} needReply: {2})",
+ new Object[]{lastServerLSN.asString(), clockTime, replyRequired});
+ }
+
+ return replyRequired;
}
- return buffer.slice();
- }
+ private ByteBuffer processXLogData(ByteBuffer buffer) {
+ long startLsn = buffer.getLong();
+ startOfLastMessageLSN = LogSequenceNumber.valueOf(startLsn);
+ lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
+ long systemClock = buffer.getLong();
- private void checkClose() throws PSQLException {
- if (isClosed()) {
- throw new PSQLException(GT.tr("This replication stream has been closed."),
- PSQLState.CONNECTION_DOES_NOT_EXIST);
- }
- }
+ if (replicationType == ReplicationType.LOGICAL) {
+ lastReceiveLSN = LogSequenceNumber.valueOf(startLsn);
+ } else if (replicationType == ReplicationType.PHYSICAL) {
+ int payloadSize = buffer.limit() - buffer.position();
+ lastReceiveLSN = LogSequenceNumber.valueOf(startLsn + payloadSize);
+ }
- @Override
- public void close() throws SQLException {
- if (isClosed()) {
- return;
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " <=BE XLogData(currWal: {0}, lastServerWal: {1}, clock: {2})",
+ new Object[]{lastReceiveLSN.asString(), lastServerLSN.asString(), systemClock});
+ }
+
+ return buffer.slice();
}
- LOGGER.log(Level.FINEST, " FE=> StopReplication");
+ private void checkClose() throws PSQLException {
+ if (isClosed()) {
+ throw new PSQLException(GT.tr("This replication stream has been closed."),
+ PSQLState.CONNECTION_DOES_NOT_EXIST);
+ }
+ }
- copyDual.endCopy();
+ @Override
+ public void close() throws SQLException {
+ if (isClosed()) {
+ return;
+ }
- closeFlag = true;
- }
+ LOGGER.log(Level.FINEST, " FE=> StopReplication");
+
+ copyDual.endCopy();
+
+ closeFlag = true;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java
index c522447..176dd0d 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java
@@ -5,6 +5,11 @@
package org.postgresql.core.v3.replication;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import org.postgresql.copy.CopyDual;
import org.postgresql.core.PGStream;
import org.postgresql.core.QueryExecutor;
@@ -18,124 +23,118 @@ import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.Properties;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
public class V3ReplicationProtocol implements ReplicationProtocol {
- private static final Logger LOGGER = Logger.getLogger(V3ReplicationProtocol.class.getName());
- private final QueryExecutor queryExecutor;
- private final PGStream pgStream;
+ private static final Logger LOGGER = Logger.getLogger(V3ReplicationProtocol.class.getName());
+ private final QueryExecutor queryExecutor;
+ private final PGStream pgStream;
- public V3ReplicationProtocol(QueryExecutor queryExecutor, PGStream pgStream) {
- this.queryExecutor = queryExecutor;
- this.pgStream = pgStream;
- }
-
- @Override
- public PGReplicationStream startLogical(LogicalReplicationOptions options)
- throws SQLException {
-
- String query = createStartLogicalQuery(options);
- return initializeReplication(query, options, ReplicationType.LOGICAL);
- }
-
- @Override
- public PGReplicationStream startPhysical(PhysicalReplicationOptions options)
- throws SQLException {
-
- String query = createStartPhysicalQuery(options);
- return initializeReplication(query, options, ReplicationType.PHYSICAL);
- }
-
- private PGReplicationStream initializeReplication(String query, CommonOptions options,
- ReplicationType replicationType)
- throws SQLException {
- LOGGER.log(Level.FINEST, " FE=> StartReplication(query: {0})", query);
-
- configureSocketTimeout(options);
- CopyDual copyDual = (CopyDual) queryExecutor.startCopy(query, true);
-
- return new V3PGReplicationStream(
- copyDual,
- options.getStartLSNPosition(),
- options.getStatusInterval(),
- replicationType
- );
- }
-
- /**
- * START_REPLICATION [SLOT slot_name] [PHYSICAL] XXX/XXX.
- */
- private String createStartPhysicalQuery(PhysicalReplicationOptions options) {
- StringBuilder builder = new StringBuilder();
- builder.append("START_REPLICATION");
-
- if (options.getSlotName() != null) {
- builder.append(" SLOT ").append(options.getSlotName());
+ public V3ReplicationProtocol(QueryExecutor queryExecutor, PGStream pgStream) {
+ this.queryExecutor = queryExecutor;
+ this.pgStream = pgStream;
}
- builder.append(" PHYSICAL ").append(options.getStartLSNPosition().asString());
+ @Override
+ public PGReplicationStream startLogical(LogicalReplicationOptions options)
+ throws SQLException {
- return builder.toString();
- }
-
- /**
- * START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [option_value] [, ... ] ) ]
- */
- private String createStartLogicalQuery(LogicalReplicationOptions options) {
- StringBuilder builder = new StringBuilder();
- builder.append("START_REPLICATION SLOT ")
- .append(options.getSlotName())
- .append(" LOGICAL ")
- .append(options.getStartLSNPosition().asString());
-
- Properties slotOptions = options.getSlotOptions();
- if (slotOptions.isEmpty()) {
- return builder.toString();
+ String query = createStartLogicalQuery(options);
+ return initializeReplication(query, options, ReplicationType.LOGICAL);
}
- //todo replace on java 8
- builder.append(" (");
- boolean isFirst = true;
- for (String name : slotOptions.stringPropertyNames()) {
- if (isFirst) {
- isFirst = false;
- } else {
- builder.append(", ");
- }
- builder.append('\"').append(name).append('\"').append(" ")
- .append('\'').append(slotOptions.getProperty(name)).append('\'');
- }
- builder.append(")");
+ @Override
+ public PGReplicationStream startPhysical(PhysicalReplicationOptions options)
+ throws SQLException {
- return builder.toString();
- }
-
- private void configureSocketTimeout(CommonOptions options) throws PSQLException {
- if (options.getStatusInterval() == 0) {
- return;
+ String query = createStartPhysicalQuery(options);
+ return initializeReplication(query, options, ReplicationType.PHYSICAL);
}
- try {
- int previousTimeOut = pgStream.getSocket().getSoTimeout();
+ private PGReplicationStream initializeReplication(String query, CommonOptions options,
+ ReplicationType replicationType)
+ throws SQLException {
+ LOGGER.log(Level.FINEST, " FE=> StartReplication(query: {0})", query);
- int minimalTimeOut;
- if (previousTimeOut > 0) {
- minimalTimeOut = Math.min(previousTimeOut, options.getStatusInterval());
- } else {
- minimalTimeOut = options.getStatusInterval();
- }
+ configureSocketTimeout(options);
+ CopyDual copyDual = (CopyDual) queryExecutor.startCopy(query, true);
- pgStream.getSocket().setSoTimeout(minimalTimeOut);
- // Use blocking 1ms reads for `available()` checks
- pgStream.setMinStreamAvailableCheckDelay(0);
- } catch (IOException ioe) {
- throw new PSQLException(GT.tr("The connection attempt failed."),
- PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
+ return new V3PGReplicationStream(
+ copyDual,
+ options.getStartLSNPosition(),
+ options.getStatusInterval(),
+ replicationType
+ );
+ }
+
+ /**
+ * START_REPLICATION [SLOT slot_name] [PHYSICAL] XXX/XXX.
+ */
+ private String createStartPhysicalQuery(PhysicalReplicationOptions options) {
+ StringBuilder builder = new StringBuilder();
+ builder.append("START_REPLICATION");
+
+ if (options.getSlotName() != null) {
+ builder.append(" SLOT ").append(options.getSlotName());
+ }
+
+ builder.append(" PHYSICAL ").append(options.getStartLSNPosition().asString());
+
+ return builder.toString();
+ }
+
+ /**
+ * START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [option_value] [, ... ] ) ]
+ */
+ private String createStartLogicalQuery(LogicalReplicationOptions options) {
+ StringBuilder builder = new StringBuilder();
+ builder.append("START_REPLICATION SLOT ")
+ .append(options.getSlotName())
+ .append(" LOGICAL ")
+ .append(options.getStartLSNPosition().asString());
+
+ Properties slotOptions = options.getSlotOptions();
+ if (slotOptions.isEmpty()) {
+ return builder.toString();
+ }
+
+ //todo replace on java 8
+ builder.append(" (");
+ boolean isFirst = true;
+ for (String name : slotOptions.stringPropertyNames()) {
+ if (isFirst) {
+ isFirst = false;
+ } else {
+ builder.append(", ");
+ }
+ builder.append('\"').append(name).append('\"').append(" ")
+ .append('\'').append(slotOptions.getProperty(name)).append('\'');
+ }
+ builder.append(")");
+
+ return builder.toString();
+ }
+
+ private void configureSocketTimeout(CommonOptions options) throws PSQLException {
+ if (options.getStatusInterval() == 0) {
+ return;
+ }
+
+ try {
+ int previousTimeOut = pgStream.getSocket().getSoTimeout();
+
+ int minimalTimeOut;
+ if (previousTimeOut > 0) {
+ minimalTimeOut = Math.min(previousTimeOut, options.getStatusInterval());
+ } else {
+ minimalTimeOut = options.getStatusInterval();
+ }
+
+ pgStream.getSocket().setSoTimeout(minimalTimeOut);
+ // Use blocking 1ms reads for `available()` checks
+ pgStream.setMinStreamAvailableCheckDelay(0);
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("The connection attempt failed."),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
+ }
}
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java
index 44edf9a..9ccef55 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java
@@ -5,17 +5,15 @@
package org.postgresql.ds;
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.util.DriverInfo;
-
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.sql.SQLException;
-
import javax.sql.ConnectionPoolDataSource;
import javax.sql.PooledConnection;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.util.DriverInfo;
/**
* PostgreSQL implementation of ConnectionPoolDataSource. The app server or middleware vendor should
@@ -39,68 +37,68 @@ import javax.sql.PooledConnection;
*/
@SuppressWarnings("serial")
public class PGConnectionPoolDataSource extends BaseDataSource
- implements ConnectionPoolDataSource, Serializable {
- private boolean defaultAutoCommit = true;
+ implements ConnectionPoolDataSource, Serializable {
+ private boolean defaultAutoCommit = true;
- /**
- * Gets a description of this DataSource.
- */
- @Override
- public String getDescription() {
- return "ConnectionPoolDataSource from " + DriverInfo.DRIVER_FULL_NAME;
- }
+ /**
+ * Gets a description of this DataSource.
+ */
+ @Override
+ public String getDescription() {
+ return "ConnectionPoolDataSource from " + DriverInfo.DRIVER_FULL_NAME;
+ }
- /**
- * Gets a connection which may be pooled by the app server or middleware implementation of
- * DataSource.
- *
- * @throws java.sql.SQLException Occurs when the physical database connection cannot be
- * established.
- */
- @Override
- public PooledConnection getPooledConnection() throws SQLException {
- return new PGPooledConnection(getConnection(), defaultAutoCommit);
- }
+ /**
+ * Gets a connection which may be pooled by the app server or middleware implementation of
+ * DataSource.
+ *
+ * @throws java.sql.SQLException Occurs when the physical database connection cannot be
+ * established.
+ */
+ @Override
+ public PooledConnection getPooledConnection() throws SQLException {
+ return new PGPooledConnection(getConnection(), defaultAutoCommit);
+ }
- /**
- * Gets a connection which may be pooled by the app server or middleware implementation of
- * DataSource.
- *
- * @throws java.sql.SQLException Occurs when the physical database connection cannot be
- * established.
- */
- @Override
- public PooledConnection getPooledConnection(String user, String password) throws SQLException {
- return new PGPooledConnection(getConnection(user, password), defaultAutoCommit);
- }
+ /**
+ * Gets a connection which may be pooled by the app server or middleware implementation of
+ * DataSource.
+ *
+ * @throws java.sql.SQLException Occurs when the physical database connection cannot be
+ * established.
+ */
+ @Override
+ public PooledConnection getPooledConnection(String user, String password) throws SQLException {
+ return new PGPooledConnection(getConnection(user, password), defaultAutoCommit);
+ }
- /**
- * Gets whether connections supplied by this pool will have autoCommit turned on by default. The
- * default value is {@code true}, so that autoCommit will be turned on by default.
- *
- * @return true if connections supplied by this pool will have autoCommit
- */
- public boolean isDefaultAutoCommit() {
- return defaultAutoCommit;
- }
+ /**
+ * Gets whether connections supplied by this pool will have autoCommit turned on by default. The
+ * default value is {@code true}, so that autoCommit will be turned on by default.
+ *
+ * @return true if connections supplied by this pool will have autoCommit
+ */
+ public boolean isDefaultAutoCommit() {
+ return defaultAutoCommit;
+ }
- /**
- * Sets whether connections supplied by this pool will have autoCommit turned on by default. The
- * default value is {@code true}, so that autoCommit will be turned on by default.
- *
- * @param defaultAutoCommit whether connections supplied by this pool will have autoCommit
- */
- public void setDefaultAutoCommit(boolean defaultAutoCommit) {
- this.defaultAutoCommit = defaultAutoCommit;
- }
+ /**
+ * Sets whether connections supplied by this pool will have autoCommit turned on by default. The
+ * default value is {@code true}, so that autoCommit will be turned on by default.
+ *
+ * @param defaultAutoCommit whether connections supplied by this pool will have autoCommit
+ */
+ public void setDefaultAutoCommit(boolean defaultAutoCommit) {
+ this.defaultAutoCommit = defaultAutoCommit;
+ }
- private void writeObject(ObjectOutputStream out) throws IOException {
- writeBaseObject(out);
- out.writeBoolean(defaultAutoCommit);
- }
+ private void writeObject(ObjectOutputStream out) throws IOException {
+ writeBaseObject(out);
+ out.writeBoolean(defaultAutoCommit);
+ }
- private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
- readBaseObject(in);
- defaultAutoCommit = in.readBoolean();
- }
+ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+ readBaseObject(in);
+ defaultAutoCommit = in.readBoolean();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java b/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java
index 147e7bc..c4bc809 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java
@@ -5,12 +5,6 @@
package org.postgresql.ds;
-import org.postgresql.PGConnection;
-import org.postgresql.PGStatement;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@@ -22,11 +16,15 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.LinkedList;
import java.util.List;
-
import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.PooledConnection;
import javax.sql.StatementEventListener;
+import org.postgresql.PGConnection;
+import org.postgresql.PGStatement;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
/**
* PostgreSQL implementation of the PooledConnection interface. This shouldn't be used directly, as
@@ -38,426 +36,425 @@ import javax.sql.StatementEventListener;
*/
@SuppressWarnings("rawtypes")
public class PGPooledConnection implements PooledConnection {
- private final List listeners = new LinkedList<>();
- private Connection con;
- private ConnectionHandler last;
- private final boolean autoCommit;
- private final boolean isXA;
+ // Classes we consider fatal.
+ private static final String[] fatalClasses = {
+ "08", // connection error
+ "53", // insufficient resources
- /**
- * Creates a new PooledConnection representing the specified physical connection.
- *
- * @param con connection
- * @param autoCommit whether to autocommit
- * @param isXA whether connection is a XA connection
- */
- public PGPooledConnection(Connection con, boolean autoCommit, boolean isXA) {
- this.con = con;
- this.autoCommit = autoCommit;
- this.isXA = isXA;
- }
+ // nb: not just "57" as that includes query cancel which is nonfatal
+ "57P01", // admin shutdown
+ "57P02", // crash shutdown
+ "57P03", // cannot connect now
- public PGPooledConnection(Connection con, boolean autoCommit) {
- this(con, autoCommit, false);
- }
-
- /**
- * Adds a listener for close or fatal error events on the connection handed out to a client.
- */
- @Override
- public void addConnectionEventListener(ConnectionEventListener connectionEventListener) {
- listeners.add(connectionEventListener);
- }
-
- /**
- * Removes a listener for close or fatal error events on the connection handed out to a client.
- */
- @Override
- public void removeConnectionEventListener(ConnectionEventListener connectionEventListener) {
- listeners.remove(connectionEventListener);
- }
-
- /**
- * Closes the physical database connection represented by this PooledConnection. If any client has
- * a connection based on this PooledConnection, it is forcibly closed as well.
- */
- @Override
- public void close() throws SQLException {
- Connection con = this.con;
- ConnectionHandler last = this.last;
- if (last != null) {
- last.close();
- if (con != null && !con.isClosed()) {
- if (!con.getAutoCommit()) {
- try {
- con.rollback();
- } catch (SQLException ignored) {
- }
- }
- }
- }
- if (con == null) {
- return;
- }
- try {
- con.close();
- } finally {
- this.con = null;
- }
- }
-
- /**
- * Gets a handle for a client to use. This is a wrapper around the physical connection, so the
- * client can call close and it will just return the connection to the pool without really closing
- * the physical connection.
- *
- *
- * According to the JDBC 2.0 Optional Package spec (6.2.3), only one client may have an active
- * handle to the connection at a time, so if there is a previous handle active when this is
- * called, the previous one is forcibly closed and its work rolled back.
- *
- */
- @Override
- public Connection getConnection() throws SQLException {
- Connection con = this.con;
- if (con == null) {
- // Before throwing the exception, let's notify the registered listeners about the error
- PSQLException sqlException =
- new PSQLException(GT.tr("This PooledConnection has already been closed."),
- PSQLState.CONNECTION_DOES_NOT_EXIST);
- fireConnectionFatalError(sqlException);
- throw sqlException;
- }
- // If any error occurs while opening a new connection, the listeners
- // have to be notified. This gives a chance to connection pools to
- // eliminate bad pooled connections.
- try {
- // Only one connection can be open at a time from this PooledConnection. See JDBC 2.0 Optional
- // Package spec section 6.2.3
- ConnectionHandler last = this.last;
- if (last != null) {
- last.close();
- if (con != null) {
- if (!con.getAutoCommit()) {
- try {
- con.rollback();
- } catch (SQLException ignored) {
- }
- }
- con.clearWarnings();
- }
- }
- /*
- * In XA-mode, autocommit is handled in PGXAConnection, because it depends on whether an
- * XA-transaction is open or not
- */
- if (!isXA && con != null) {
- con.setAutoCommit(autoCommit);
- }
- } catch (SQLException sqlException) {
- fireConnectionFatalError(sqlException);
- throw (SQLException) sqlException.fillInStackTrace();
- }
- ConnectionHandler handler = new ConnectionHandler(con);
- last = handler;
-
- Connection proxyCon = (Connection) Proxy.newProxyInstance(getClass().getClassLoader(),
- new Class[]{Connection.class, PGConnection.class}, handler);
- handler.setProxy(proxyCon);
- return proxyCon;
- }
-
- /**
- * Used to fire a connection closed event to all listeners.
- */
- void fireConnectionClosed() {
- ConnectionEvent evt = null;
- // Copy the listener list so the listener can remove itself during this method call
- ConnectionEventListener[] local =
- listeners.toArray(new ConnectionEventListener[0]);
- for (ConnectionEventListener listener : local) {
- if (evt == null) {
- evt = createConnectionEvent(null);
- }
- listener.connectionClosed(evt);
- }
- }
-
- /**
- * Used to fire a connection error event to all listeners.
- */
- void fireConnectionFatalError(SQLException e) {
- ConnectionEvent evt = null;
- // Copy the listener list so the listener can remove itself during this method call
- ConnectionEventListener[] local =
- listeners.toArray(new ConnectionEventListener[0]);
- for (ConnectionEventListener listener : local) {
- if (evt == null) {
- evt = createConnectionEvent(e);
- }
- listener.connectionErrorOccurred(evt);
- }
- }
-
- protected ConnectionEvent createConnectionEvent(SQLException e) {
- return e == null ? new ConnectionEvent(this) : new ConnectionEvent(this, e);
- }
-
- // Classes we consider fatal.
- private static final String[] fatalClasses = {
- "08", // connection error
- "53", // insufficient resources
-
- // nb: not just "57" as that includes query cancel which is nonfatal
- "57P01", // admin shutdown
- "57P02", // crash shutdown
- "57P03", // cannot connect now
-
- "58", // system error (backend)
- "60", // system error (driver)
- "99", // unexpected error
- "F0", // configuration file error (backend)
- "XX", // internal error (backend)
- };
-
- private static boolean isFatalState(String state) {
- if (state == null) {
- // no info, assume fatal
- return true;
- }
- if (state.length() < 2) {
- // no class info, assume fatal
- return true;
- }
-
- for (String fatalClass : fatalClasses) {
- if (state.startsWith(fatalClass)) {
- return true; // fatal
- }
- }
-
- return false;
- }
-
- /**
- * Fires a connection error event, but only if we think the exception is fatal.
- *
- * @param e the SQLException to consider
- */
- private void fireConnectionError(SQLException e) {
- if (!isFatalState(e.getSQLState())) {
- return;
- }
-
- fireConnectionFatalError(e);
- }
-
- /**
- * Instead of declaring a class implementing Connection, which would have to be updated for every
- * JDK rev, use a dynamic proxy to handle all calls through the Connection interface. This is the
- * part that requires JDK 1.3 or higher, though JDK 1.2 could be supported with a 3rd-party proxy
- * package.
- */
- private class ConnectionHandler implements InvocationHandler {
+ "58", // system error (backend)
+ "60", // system error (driver)
+ "99", // unexpected error
+ "F0", // configuration file error (backend)
+ "XX", // internal error (backend)
+ };
+ private final List listeners = new LinkedList<>();
+ private final boolean autoCommit;
+ private final boolean isXA;
private Connection con;
- private Connection proxy; // the Connection the client is currently using, which is a proxy
- private boolean automatic;
+ private ConnectionHandler last;
- ConnectionHandler(Connection con) {
- this.con = con;
+ /**
+ * Creates a new PooledConnection representing the specified physical connection.
+ *
+ * @param con connection
+ * @param autoCommit whether to autocommit
+ * @param isXA whether connection is a XA connection
+ */
+ public PGPooledConnection(Connection con, boolean autoCommit, boolean isXA) {
+ this.con = con;
+ this.autoCommit = autoCommit;
+ this.isXA = isXA;
}
+ public PGPooledConnection(Connection con, boolean autoCommit) {
+ this(con, autoCommit, false);
+ }
+
+ private static boolean isFatalState(String state) {
+ if (state == null) {
+ // no info, assume fatal
+ return true;
+ }
+ if (state.length() < 2) {
+ // no class info, assume fatal
+ return true;
+ }
+
+ for (String fatalClass : fatalClasses) {
+ if (state.startsWith(fatalClass)) {
+ return true; // fatal
+ }
+ }
+
+ return false;
+ }
+
+ /**
+ * Adds a listener for close or fatal error events on the connection handed out to a client.
+ */
@Override
- @SuppressWarnings("throwing.nullable")
- public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
- final String methodName = method.getName();
- // From Object
- if (method.getDeclaringClass() == Object.class) {
- if ("toString".equals(methodName)) {
- return "Pooled connection wrapping physical connection " + con;
+ public void addConnectionEventListener(ConnectionEventListener connectionEventListener) {
+ listeners.add(connectionEventListener);
+ }
+
+ /**
+ * Removes a listener for close or fatal error events on the connection handed out to a client.
+ */
+ @Override
+ public void removeConnectionEventListener(ConnectionEventListener connectionEventListener) {
+ listeners.remove(connectionEventListener);
+ }
+
+ /**
+ * Closes the physical database connection represented by this PooledConnection. If any client has
+ * a connection based on this PooledConnection, it is forcibly closed as well.
+ */
+ @Override
+ public void close() throws SQLException {
+ Connection con = this.con;
+ ConnectionHandler last = this.last;
+ if (last != null) {
+ last.close();
+ if (con != null && !con.isClosed()) {
+ if (!con.getAutoCommit()) {
+ try {
+ con.rollback();
+ } catch (SQLException ignored) {
+ }
+ }
+ }
}
- if ("equals".equals(methodName)) {
- return proxy == args[0];
- }
- if ("hashCode".equals(methodName)) {
- return System.identityHashCode(proxy);
+ if (con == null) {
+ return;
}
try {
- return method.invoke(con, args);
- } catch (InvocationTargetException e) {
- // throwing.nullable
- throw e.getTargetException();
+ con.close();
+ } finally {
+ this.con = null;
}
- }
+ }
- // All the rest is from the Connection or PGConnection interface
- Connection con = this.con;
- if ("isClosed".equals(methodName)) {
- return con == null || con.isClosed();
- }
- if ("close".equals(methodName)) {
- // we are already closed and a double close
- // is not an error.
+ /**
+ * Gets a handle for a client to use. This is a wrapper around the physical connection, so the
+ * client can call close and it will just return the connection to the pool without really closing
+ * the physical connection.
+ *
+ *
+ * According to the JDBC 2.0 Optional Package spec (6.2.3), only one client may have an active
+ * handle to the connection at a time, so if there is a previous handle active when this is
+ * called, the previous one is forcibly closed and its work rolled back.
+ *
+ */
+ @Override
+ public Connection getConnection() throws SQLException {
+ Connection con = this.con;
if (con == null) {
- return null;
+ // Before throwing the exception, let's notify the registered listeners about the error
+ PSQLException sqlException =
+ new PSQLException(GT.tr("This PooledConnection has already been closed."),
+ PSQLState.CONNECTION_DOES_NOT_EXIST);
+ fireConnectionFatalError(sqlException);
+ throw sqlException;
}
-
- SQLException ex = null;
- if (!con.isClosed()) {
- if (!isXA && !con.getAutoCommit()) {
- try {
- con.rollback();
- } catch (SQLException e) {
- ex = e;
+ // If any error occurs while opening a new connection, the listeners
+ // have to be notified. This gives a chance to connection pools to
+ // eliminate bad pooled connections.
+ try {
+ // Only one connection can be open at a time from this PooledConnection. See JDBC 2.0 Optional
+ // Package spec section 6.2.3
+ ConnectionHandler last = this.last;
+ if (last != null) {
+ last.close();
+ if (con != null) {
+ if (!con.getAutoCommit()) {
+ try {
+ con.rollback();
+ } catch (SQLException ignored) {
+ }
+ }
+ con.clearWarnings();
+ }
}
- }
- con.clearWarnings();
+ /*
+ * In XA-mode, autocommit is handled in PGXAConnection, because it depends on whether an
+ * XA-transaction is open or not
+ */
+ if (!isXA && con != null) {
+ con.setAutoCommit(autoCommit);
+ }
+ } catch (SQLException sqlException) {
+ fireConnectionFatalError(sqlException);
+ throw (SQLException) sqlException.fillInStackTrace();
}
- this.con = null;
- this.proxy = null;
- last = null;
- fireConnectionClosed();
- if (ex != null) {
- throw ex;
+ ConnectionHandler handler = new ConnectionHandler(con);
+ last = handler;
+
+ Connection proxyCon = (Connection) Proxy.newProxyInstance(getClass().getClassLoader(),
+ new Class[]{Connection.class, PGConnection.class}, handler);
+ handler.setProxy(proxyCon);
+ return proxyCon;
+ }
+
+ /**
+ * Used to fire a connection closed event to all listeners.
+ */
+ void fireConnectionClosed() {
+ ConnectionEvent evt = null;
+ // Copy the listener list so the listener can remove itself during this method call
+ ConnectionEventListener[] local =
+ listeners.toArray(new ConnectionEventListener[0]);
+ for (ConnectionEventListener listener : local) {
+ if (evt == null) {
+ evt = createConnectionEvent(null);
+ }
+ listener.connectionClosed(evt);
}
- return null;
- }
- if (con == null || con.isClosed()) {
- throw new PSQLException(automatic
- ? GT.tr(
- "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.")
- : GT.tr("Connection has been closed."), PSQLState.CONNECTION_DOES_NOT_EXIST);
- }
+ }
- // From here on in, we invoke via reflection, catch exceptions,
- // and check if they're fatal before rethrowing.
- try {
- if ("createStatement".equals(methodName)) {
- Statement st = (Statement) method.invoke(con, args);
- return Proxy.newProxyInstance(getClass().getClassLoader(),
- new Class[]{Statement.class, PGStatement.class},
- new StatementHandler(this, st));
- } else if ("prepareCall".equals(methodName)) {
- Statement st = (Statement) method.invoke(con, args);
- return Proxy.newProxyInstance(getClass().getClassLoader(),
- new Class[]{CallableStatement.class, PGStatement.class},
- new StatementHandler(this, st));
- } else if ("prepareStatement".equals(methodName)) {
- Statement st = (Statement) method.invoke(con, args);
- return Proxy.newProxyInstance(getClass().getClassLoader(),
- new Class[]{PreparedStatement.class, PGStatement.class},
- new StatementHandler(this, st));
- } else {
- return method.invoke(con, args);
+ /**
+ * Used to fire a connection error event to all listeners.
+ */
+ void fireConnectionFatalError(SQLException e) {
+ ConnectionEvent evt = null;
+ // Copy the listener list so the listener can remove itself during this method call
+ ConnectionEventListener[] local =
+ listeners.toArray(new ConnectionEventListener[0]);
+ for (ConnectionEventListener listener : local) {
+ if (evt == null) {
+ evt = createConnectionEvent(e);
+ }
+ listener.connectionErrorOccurred(evt);
}
- } catch (final InvocationTargetException ite) {
- final Throwable te = ite.getTargetException();
- if (te instanceof SQLException) {
- fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
+ }
+
+ protected ConnectionEvent createConnectionEvent(SQLException e) {
+ return e == null ? new ConnectionEvent(this) : new ConnectionEvent(this, e);
+ }
+
+ /**
+ * Fires a connection error event, but only if we think the exception is fatal.
+ *
+ * @param e the SQLException to consider
+ */
+ private void fireConnectionError(SQLException e) {
+ if (!isFatalState(e.getSQLState())) {
+ return;
}
- throw te;
- }
- }
- Connection getProxy() {
- return proxy;
- }
-
- void setProxy(Connection proxy) {
- this.proxy = proxy;
- }
-
- public void close() {
- if (con != null) {
- automatic = true;
- }
- con = null;
- proxy = null;
- // No close event fired here: see JDBC 2.0 Optional Package spec section 6.3
- }
-
- public boolean isClosed() {
- return con == null;
- }
- }
-
- /**
- *
Instead of declaring classes implementing Statement, PreparedStatement, and CallableStatement,
- * which would have to be updated for every JDK rev, use a dynamic proxy to handle all calls
- * through the Statement interfaces. This is the part that requires JDK 1.3 or higher, though JDK
- * 1.2 could be supported with a 3rd-party proxy package.
- *
- *
The StatementHandler is required in order to return the proper Connection proxy for the
- * getConnection method.
- */
- private class StatementHandler implements InvocationHandler {
- private ConnectionHandler con;
- private Statement st;
-
- StatementHandler(ConnectionHandler con, Statement st) {
- this.con = con;
- this.st = st;
+ fireConnectionFatalError(e);
}
@Override
- @SuppressWarnings("throwing.nullable")
- public Object invoke(Object proxy, Method method, Object[] args)
- throws Throwable {
- final String methodName = method.getName();
- // From Object
- if (method.getDeclaringClass() == Object.class) {
- if ("toString".equals(methodName)) {
- return "Pooled statement wrapping physical statement " + st;
- }
- if ("hashCode".equals(methodName)) {
- return System.identityHashCode(proxy);
- }
- if ("equals".equals(methodName)) {
- return proxy == args[0];
- }
- return method.invoke(st, args);
- }
-
- Statement st = this.st;
- // All the rest is from the Statement interface
- if ("isClosed".equals(methodName)) {
- return st == null || st.isClosed();
- }
- if ("close".equals(methodName)) {
- if (st == null || st.isClosed()) {
- return null;
- }
- con = null;
- this.st = null;
- st.close();
- return null;
- }
- if (st == null || st.isClosed()) {
- throw new PSQLException(GT.tr("Statement has been closed."), PSQLState.OBJECT_NOT_IN_STATE);
- }
- if ("getConnection".equals(methodName)) {
- return con.getProxy(); // the proxied connection, not a physical connection
- }
-
- // Delegate the call to the proxied Statement.
- try {
- return method.invoke(st, args);
- } catch (final InvocationTargetException ite) {
- final Throwable te = ite.getTargetException();
- if (te instanceof SQLException) {
- fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
- }
- throw te;
- }
+ public void removeStatementEventListener(StatementEventListener listener) {
}
- }
- @Override
- public void removeStatementEventListener(StatementEventListener listener) {
- }
+ @Override
+ public void addStatementEventListener(StatementEventListener listener) {
+ }
- @Override
- public void addStatementEventListener(StatementEventListener listener) {
- }
+ /**
+ * Instead of declaring a class implementing Connection, which would have to be updated for every
+ * JDK rev, use a dynamic proxy to handle all calls through the Connection interface. This is the
+ * part that requires JDK 1.3 or higher, though JDK 1.2 could be supported with a 3rd-party proxy
+ * package.
+ */
+ private class ConnectionHandler implements InvocationHandler {
+ private Connection con;
+ private Connection proxy; // the Connection the client is currently using, which is a proxy
+ private boolean automatic;
+
+ ConnectionHandler(Connection con) {
+ this.con = con;
+ }
+
+ @Override
+ @SuppressWarnings("throwing.nullable")
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ final String methodName = method.getName();
+ // From Object
+ if (method.getDeclaringClass() == Object.class) {
+ if ("toString".equals(methodName)) {
+ return "Pooled connection wrapping physical connection " + con;
+ }
+ if ("equals".equals(methodName)) {
+ return proxy == args[0];
+ }
+ if ("hashCode".equals(methodName)) {
+ return System.identityHashCode(proxy);
+ }
+ try {
+ return method.invoke(con, args);
+ } catch (InvocationTargetException e) {
+ // throwing.nullable
+ throw e.getTargetException();
+ }
+ }
+
+ // All the rest is from the Connection or PGConnection interface
+ Connection con = this.con;
+ if ("isClosed".equals(methodName)) {
+ return con == null || con.isClosed();
+ }
+ if ("close".equals(methodName)) {
+ // we are already closed and a double close
+ // is not an error.
+ if (con == null) {
+ return null;
+ }
+
+ SQLException ex = null;
+ if (!con.isClosed()) {
+ if (!isXA && !con.getAutoCommit()) {
+ try {
+ con.rollback();
+ } catch (SQLException e) {
+ ex = e;
+ }
+ }
+ con.clearWarnings();
+ }
+ this.con = null;
+ this.proxy = null;
+ last = null;
+ fireConnectionClosed();
+ if (ex != null) {
+ throw ex;
+ }
+ return null;
+ }
+ if (con == null || con.isClosed()) {
+ throw new PSQLException(automatic
+ ? GT.tr(
+ "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.")
+ : GT.tr("Connection has been closed."), PSQLState.CONNECTION_DOES_NOT_EXIST);
+ }
+
+ // From here on in, we invoke via reflection, catch exceptions,
+ // and check if they're fatal before rethrowing.
+ try {
+ if ("createStatement".equals(methodName)) {
+ Statement st = (Statement) method.invoke(con, args);
+ return Proxy.newProxyInstance(getClass().getClassLoader(),
+ new Class[]{Statement.class, PGStatement.class},
+ new StatementHandler(this, st));
+ } else if ("prepareCall".equals(methodName)) {
+ Statement st = (Statement) method.invoke(con, args);
+ return Proxy.newProxyInstance(getClass().getClassLoader(),
+ new Class[]{CallableStatement.class, PGStatement.class},
+ new StatementHandler(this, st));
+ } else if ("prepareStatement".equals(methodName)) {
+ Statement st = (Statement) method.invoke(con, args);
+ return Proxy.newProxyInstance(getClass().getClassLoader(),
+ new Class[]{PreparedStatement.class, PGStatement.class},
+ new StatementHandler(this, st));
+ } else {
+ return method.invoke(con, args);
+ }
+ } catch (final InvocationTargetException ite) {
+ final Throwable te = ite.getTargetException();
+ if (te instanceof SQLException) {
+ fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
+ }
+ throw te;
+ }
+ }
+
+ Connection getProxy() {
+ return proxy;
+ }
+
+ void setProxy(Connection proxy) {
+ this.proxy = proxy;
+ }
+
+ public void close() {
+ if (con != null) {
+ automatic = true;
+ }
+ con = null;
+ proxy = null;
+ // No close event fired here: see JDBC 2.0 Optional Package spec section 6.3
+ }
+
+ public boolean isClosed() {
+ return con == null;
+ }
+ }
+
+ /**
+ *
Instead of declaring classes implementing Statement, PreparedStatement, and CallableStatement,
+ * which would have to be updated for every JDK rev, use a dynamic proxy to handle all calls
+ * through the Statement interfaces. This is the part that requires JDK 1.3 or higher, though JDK
+ * 1.2 could be supported with a 3rd-party proxy package.
+ *
+ *
The StatementHandler is required in order to return the proper Connection proxy for the
+ * getConnection method.
+ */
+ private class StatementHandler implements InvocationHandler {
+ private ConnectionHandler con;
+ private Statement st;
+
+ StatementHandler(ConnectionHandler con, Statement st) {
+ this.con = con;
+ this.st = st;
+ }
+
+ @Override
+ @SuppressWarnings("throwing.nullable")
+ public Object invoke(Object proxy, Method method, Object[] args)
+ throws Throwable {
+ final String methodName = method.getName();
+ // From Object
+ if (method.getDeclaringClass() == Object.class) {
+ if ("toString".equals(methodName)) {
+ return "Pooled statement wrapping physical statement " + st;
+ }
+ if ("hashCode".equals(methodName)) {
+ return System.identityHashCode(proxy);
+ }
+ if ("equals".equals(methodName)) {
+ return proxy == args[0];
+ }
+ return method.invoke(st, args);
+ }
+
+ Statement st = this.st;
+ // All the rest is from the Statement interface
+ if ("isClosed".equals(methodName)) {
+ return st == null || st.isClosed();
+ }
+ if ("close".equals(methodName)) {
+ if (st == null || st.isClosed()) {
+ return null;
+ }
+ con = null;
+ this.st = null;
+ st.close();
+ return null;
+ }
+ if (st == null || st.isClosed()) {
+ throw new PSQLException(GT.tr("Statement has been closed."), PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ if ("getConnection".equals(methodName)) {
+ return con.getProxy(); // the proxied connection, not a physical connection
+ }
+
+ // Delegate the call to the proxied Statement.
+ try {
+ return method.invoke(st, args);
+ } catch (final InvocationTargetException ite) {
+ final Throwable te = ite.getTargetException();
+ if (te instanceof SQLException) {
+ fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
+ }
+ throw te;
+ }
+ }
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java
index 4743bb9..14e0032 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java
@@ -5,13 +5,6 @@
package org.postgresql.ds;
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.jdbc.ResourceLock;
-import org.postgresql.util.DriverInfo;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Stack;
@@ -19,7 +12,6 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
-
import javax.naming.NamingException;
import javax.naming.Reference;
import javax.naming.StringRefAddr;
@@ -27,6 +19,12 @@ import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.DataSource;
import javax.sql.PooledConnection;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.DriverInfo;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
/**
* DataSource which uses connection pooling. Don't use this if your
@@ -57,426 +55,423 @@ import javax.sql.PooledConnection;
*
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
- *
* @deprecated Since 42.0.0, instead of this class you should use a fully featured connection pool
- * like HikariCP, vibur-dbcp, commons-dbcp, c3p0, etc.
+ * like HikariCP, vibur-dbcp, commons-dbcp, c3p0, etc.
*/
@SuppressWarnings("try")
@Deprecated
public class PGPoolingDataSource extends BaseDataSource implements DataSource {
- protected static ConcurrentMap dataSources =
- new ConcurrentHashMap<>();
-
- public static PGPoolingDataSource getDataSource(String name) {
- return dataSources.get(name);
- }
-
- // Additional Data Source properties
- protected String dataSourceName; // Must be protected for subclasses to sync updates to it
- private int initialConnections;
- private int maxConnections;
- // State variables
- private boolean initialized;
- private final Stack available = new Stack<>();
- private final Stack used = new Stack<>();
- private boolean isClosed;
- private final ResourceLock lock = new ResourceLock();
- private final Condition lockCondition = lock.newCondition();
- private PGConnectionPoolDataSource source;
-
- /**
- * Gets a description of this DataSource.
- */
- @Override
- public String getDescription() {
- return "Pooling DataSource '" + dataSourceName + " from " + DriverInfo.DRIVER_FULL_NAME;
- }
-
- /**
- * Ensures the DataSource properties are not changed after the DataSource has been used.
- *
- * @throws IllegalStateException The Server Name cannot be changed after the DataSource has been
- * used.
- */
- @Override
- public void setServerName(String serverName) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- super.setServerName(serverName);
- }
-
- /**
- * Ensures the DataSource properties are not changed after the DataSource has been used.
- *
- * @throws IllegalStateException The Database Name cannot be changed after the DataSource has been
- * used.
- */
- @Override
- public void setDatabaseName(String databaseName) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- super.setDatabaseName(databaseName);
- }
-
- /**
- * Ensures the DataSource properties are not changed after the DataSource has been used.
- *
- * @throws IllegalStateException The User cannot be changed after the DataSource has been used.
- */
- @Override
- public void setUser(String user) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- super.setUser(user);
- }
-
- /**
- * Ensures the DataSource properties are not changed after the DataSource has been used.
- *
- * @throws IllegalStateException The Password cannot be changed after the DataSource has been
- * used.
- */
- @Override
- public void setPassword(String password) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- super.setPassword(password);
- }
-
- /**
- * Ensures the DataSource properties are not changed after the DataSource has been used.
- *
- * @throws IllegalStateException The Port Number cannot be changed after the DataSource has been
- * used.
- */
- @Override
- public void setPortNumber(int portNumber) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- super.setPortNumber(portNumber);
- }
-
- /**
- * Gets the number of connections that will be created when this DataSource is initialized. If you
- * do not call initialize explicitly, it will be initialized the first time a connection is drawn
- * from it.
- *
- * @return number of connections that will be created when this DataSource is initialized
- */
- public int getInitialConnections() {
- return initialConnections;
- }
-
- /**
- * Sets the number of connections that will be created when this DataSource is initialized. If you
- * do not call initialize explicitly, it will be initialized the first time a connection is drawn
- * from it.
- *
- * @param initialConnections number of initial connections
- * @throws IllegalStateException The Initial Connections cannot be changed after the DataSource
- * has been used.
- */
- public void setInitialConnections(int initialConnections) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- this.initialConnections = initialConnections;
- }
-
- /**
- * Gets the maximum number of connections that the pool will allow. If a request comes in and this
- * many connections are in use, the request will block until a connection is available. Note that
- * connections for a user other than the default user will not be pooled and don't count against
- * this limit.
- *
- * @return The maximum number of pooled connection allowed, or 0 for no maximum.
- */
- public int getMaxConnections() {
- return maxConnections;
- }
-
- /**
- * Sets the maximum number of connections that the pool will allow. If a request comes in and this
- * many connections are in use, the request will block until a connection is available. Note that
- * connections for a user other than the default user will not be pooled and don't count against
- * this limit.
- *
- * @param maxConnections The maximum number of pooled connection to allow, or 0 for no maximum.
- * @throws IllegalStateException The Maximum Connections cannot be changed after the DataSource
- * has been used.
- */
- public void setMaxConnections(int maxConnections) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- this.maxConnections = maxConnections;
- }
-
- /**
- * Gets the name of this DataSource. This uniquely identifies the DataSource. You cannot use more
- * than one DataSource in the same VM with the same name.
- *
- * @return name of this DataSource
- */
- public String getDataSourceName() {
- return dataSourceName;
- }
-
- /**
- * Sets the name of this DataSource. This is required, and uniquely identifies the DataSource. You
- * cannot create or use more than one DataSource in the same VM with the same name.
- *
- * @param dataSourceName datasource name
- * @throws IllegalStateException The Data Source Name cannot be changed after the DataSource has
- * been used.
- * @throws IllegalArgumentException Another PoolingDataSource with the same dataSourceName already
- * exists.
- */
- public void setDataSourceName(String dataSourceName) {
- if (initialized) {
- throw new IllegalStateException(
- "Cannot set Data Source properties after DataSource has been used");
- }
- if (this.dataSourceName != null && dataSourceName != null
- && dataSourceName.equals(this.dataSourceName)) {
- return;
- }
- PGPoolingDataSource previous = dataSources.putIfAbsent(dataSourceName, this);
- if (previous != null) {
- throw new IllegalArgumentException(
- "DataSource with name '" + dataSourceName + "' already exists!");
- }
- if (this.dataSourceName != null) {
- dataSources.remove(this.dataSourceName);
- }
- this.dataSourceName = dataSourceName;
- }
-
- /**
- * Initializes this DataSource. If the initialConnections is greater than zero, that number of
- * connections will be created. After this method is called, the DataSource properties cannot be
- * changed. If you do not call this explicitly, it will be called the first time you get a
- * connection from the DataSource.
- *
- * @throws SQLException Occurs when the initialConnections is greater than zero, but the
- * DataSource is not able to create enough physical connections.
- */
- public void initialize() throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- PGConnectionPoolDataSource source = createConnectionPool();
- this.source = source;
- try {
- source.initializeFrom(this);
- } catch (Exception e) {
- throw new PSQLException(GT.tr("Failed to setup DataSource."), PSQLState.UNEXPECTED_ERROR,
- e);
- }
-
- while (available.size() < initialConnections) {
- available.push(source.getPooledConnection());
- }
-
- initialized = true;
- }
- }
-
- protected boolean isInitialized() {
- return initialized;
- }
-
- /**
- * Creates the appropriate ConnectionPool to use for this DataSource.
- *
- * @return appropriate ConnectionPool to use for this DataSource
- */
- protected PGConnectionPoolDataSource createConnectionPool() {
- return new PGConnectionPoolDataSource();
- }
-
- /**
- * Gets a non-pooled connection, unless the user and password are the same as the default
- * values for this connection pool.
- *
- * @return A pooled connection.
- * @throws SQLException Occurs when no pooled connection is available, and a new physical
- * connection cannot be created.
- */
- @Override
- public Connection getConnection(String user, String password)
- throws SQLException {
- // If this is for the default user/password, use a pooled connection
- if (user == null || (user.equals(getUser()) && ((password == null && getPassword() == null)
- || (password != null && password.equals(getPassword()))))) {
- return getConnection();
- }
- // Otherwise, use a non-pooled connection
- if (!initialized) {
- initialize();
- }
- return super.getConnection(user, password);
- }
-
- /**
- * Gets a connection from the connection pool.
- *
- * @return A pooled connection.
- * @throws SQLException Occurs when no pooled connection is available, and a new physical
- * connection cannot be created.
- */
- @Override
- public Connection getConnection() throws SQLException {
- if (!initialized) {
- initialize();
- }
- return getPooledConnection();
- }
-
- /**
- * Closes this DataSource, and all the pooled connections, whether in use or not.
- */
- public void close() {
- try (ResourceLock ignore = lock.obtain()) {
- isClosed = true;
- while (!available.isEmpty()) {
- PooledConnection pci = available.pop();
- try {
- pci.close();
- } catch (SQLException ignored) {
+ protected static ConcurrentMap dataSources =
+ new ConcurrentHashMap<>();
+ private final Stack available = new Stack<>();
+ private final Stack used = new Stack<>();
+ private final ResourceLock lock = new ResourceLock();
+ private final Condition lockCondition = lock.newCondition();
+ // Additional Data Source properties
+ protected String dataSourceName; // Must be protected for subclasses to sync updates to it
+ private int initialConnections;
+ private int maxConnections;
+ // State variables
+ private boolean initialized;
+ private boolean isClosed;
+ /**
+ * Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection.
+ * This is the only way connections are marked as unused.
+ */
+ private final ConnectionEventListener connectionEventListener = new ConnectionEventListener() {
+ @Override
+ public void connectionClosed(ConnectionEvent event) {
+ ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
+ try (ResourceLock ignore = lock.obtain()) {
+ if (isClosed) {
+ return; // DataSource has been closed
+ }
+ boolean removed = used.remove(event.getSource());
+ if (removed) {
+ available.push((PooledConnection) event.getSource());
+ // There's now a new connection available
+ lockCondition.signal();
+ } else {
+ // a connection error occurred
+ }
+ }
}
- }
- while (!used.isEmpty()) {
- PooledConnection pci = used.pop();
- pci.removeConnectionEventListener(connectionEventListener);
- try {
- pci.close();
- } catch (SQLException ignored) {
- }
- }
- }
- removeStoredDataSource();
- }
- protected void removeStoredDataSource() {
- dataSources.remove(dataSourceName);
- }
+ /**
+ * This is only called for fatal errors, where the physical connection is useless afterward and
+ * should be removed from the pool.
+ */
+ @Override
+ public void connectionErrorOccurred(ConnectionEvent event) {
+ ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
+ try (ResourceLock ignore = lock.obtain()) {
+ if (isClosed) {
+ return; // DataSource has been closed
+ }
+ used.remove(event.getSource());
+ // We're now at least 1 connection under the max
+ lockCondition.signal();
+ }
+ }
+ };
+ private PGConnectionPoolDataSource source;
- protected void addDataSource(String dataSourceName) {
- dataSources.put(dataSourceName, this);
- }
-
- /**
- * Gets a connection from the pool. Will get an available one if present, or create a new one if
- * under the max limit. Will block if all used and a new one would exceed the max.
- */
- private Connection getPooledConnection() throws SQLException {
- PooledConnection pc = null;
- try (ResourceLock ignore = lock.obtain()) {
- if (isClosed) {
- throw new PSQLException(GT.tr("DataSource has been closed."),
- PSQLState.CONNECTION_DOES_NOT_EXIST);
- }
- while (true) {
- if (!available.isEmpty()) {
- pc = available.pop();
- used.push(pc);
- break;
- }
- if (maxConnections == 0 || used.size() < maxConnections) {
- pc = source.getPooledConnection();
- used.push(pc);
- break;
- } else {
- try {
- // Wake up every second at a minimum
- lockCondition.await(1000L, TimeUnit.MILLISECONDS);
- } catch (InterruptedException ignored) {
- }
- }
- }
- }
- pc.addConnectionEventListener(connectionEventListener);
- return pc.getConnection();
- }
-
- /**
- * Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection.
- * This is the only way connections are marked as unused.
- */
- private final ConnectionEventListener connectionEventListener = new ConnectionEventListener() {
- @Override
- public void connectionClosed(ConnectionEvent event) {
- ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
- try (ResourceLock ignore = lock.obtain()) {
- if (isClosed) {
- return; // DataSource has been closed
- }
- boolean removed = used.remove(event.getSource());
- if (removed) {
- available.push((PooledConnection) event.getSource());
- // There's now a new connection available
- lockCondition.signal();
- } else {
- // a connection error occurred
- }
- }
+ public static PGPoolingDataSource getDataSource(String name) {
+ return dataSources.get(name);
}
/**
- * This is only called for fatal errors, where the physical connection is useless afterward and
- * should be removed from the pool.
+ * Gets a description of this DataSource.
*/
@Override
- public void connectionErrorOccurred(ConnectionEvent event) {
- ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
- try (ResourceLock ignore = lock.obtain()) {
- if (isClosed) {
- return; // DataSource has been closed
+ public String getDescription() {
+ return "Pooling DataSource '" + dataSourceName + " from " + DriverInfo.DRIVER_FULL_NAME;
+ }
+
+ /**
+ * Ensures the DataSource properties are not changed after the DataSource has been used.
+ *
+ * @throws IllegalStateException The Server Name cannot be changed after the DataSource has been
+ * used.
+ */
+ @Override
+ public void setServerName(String serverName) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
}
- used.remove(event.getSource());
- // We're now at least 1 connection under the max
- lockCondition.signal();
- }
+ super.setServerName(serverName);
}
- };
- /**
- * Adds custom properties for this DataSource to the properties defined in the superclass.
- */
- @Override
- public Reference getReference() throws NamingException {
- Reference ref = super.getReference();
- ref.add(new StringRefAddr("dataSourceName", dataSourceName));
- if (initialConnections > 0) {
- ref.add(new StringRefAddr("initialConnections", Integer.toString(initialConnections)));
+ /**
+ * Ensures the DataSource properties are not changed after the DataSource has been used.
+ *
+ * @throws IllegalStateException The Database Name cannot be changed after the DataSource has been
+ * used.
+ */
+ @Override
+ public void setDatabaseName(String databaseName) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
+ }
+ super.setDatabaseName(databaseName);
}
- if (maxConnections > 0) {
- ref.add(new StringRefAddr("maxConnections", Integer.toString(maxConnections)));
- }
- return ref;
- }
- @Override
- public boolean isWrapperFor(Class> iface) throws SQLException {
- return iface.isAssignableFrom(getClass());
- }
-
- @Override
- public T unwrap(Class iface) throws SQLException {
- if (iface.isAssignableFrom(getClass())) {
- return iface.cast(this);
+ /**
+ * Ensures the DataSource properties are not changed after the DataSource has been used.
+ *
+ * @throws IllegalStateException The User cannot be changed after the DataSource has been used.
+ */
+ @Override
+ public void setUser(String user) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
+ }
+ super.setUser(user);
+ }
+
+ /**
+ * Ensures the DataSource properties are not changed after the DataSource has been used.
+ *
+ * @throws IllegalStateException The Password cannot be changed after the DataSource has been
+ * used.
+ */
+ @Override
+ public void setPassword(String password) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
+ }
+ super.setPassword(password);
+ }
+
+ /**
+ * Ensures the DataSource properties are not changed after the DataSource has been used.
+ *
+ * @throws IllegalStateException The Port Number cannot be changed after the DataSource has been
+ * used.
+ */
+ @Override
+ public void setPortNumber(int portNumber) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
+ }
+ super.setPortNumber(portNumber);
+ }
+
+ /**
+ * Gets the number of connections that will be created when this DataSource is initialized. If you
+ * do not call initialize explicitly, it will be initialized the first time a connection is drawn
+ * from it.
+ *
+ * @return number of connections that will be created when this DataSource is initialized
+ */
+ public int getInitialConnections() {
+ return initialConnections;
+ }
+
+ /**
+ * Sets the number of connections that will be created when this DataSource is initialized. If you
+ * do not call initialize explicitly, it will be initialized the first time a connection is drawn
+ * from it.
+ *
+ * @param initialConnections number of initial connections
+ * @throws IllegalStateException The Initial Connections cannot be changed after the DataSource
+ * has been used.
+ */
+ public void setInitialConnections(int initialConnections) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
+ }
+ this.initialConnections = initialConnections;
+ }
+
+ /**
+ * Gets the maximum number of connections that the pool will allow. If a request comes in and this
+ * many connections are in use, the request will block until a connection is available. Note that
+ * connections for a user other than the default user will not be pooled and don't count against
+ * this limit.
+ *
+ * @return The maximum number of pooled connection allowed, or 0 for no maximum.
+ */
+ public int getMaxConnections() {
+ return maxConnections;
+ }
+
+ /**
+ * Sets the maximum number of connections that the pool will allow. If a request comes in and this
+ * many connections are in use, the request will block until a connection is available. Note that
+ * connections for a user other than the default user will not be pooled and don't count against
+ * this limit.
+ *
+ * @param maxConnections The maximum number of pooled connection to allow, or 0 for no maximum.
+ * @throws IllegalStateException The Maximum Connections cannot be changed after the DataSource
+ * has been used.
+ */
+ public void setMaxConnections(int maxConnections) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
+ }
+ this.maxConnections = maxConnections;
+ }
+
+ /**
+ * Gets the name of this DataSource. This uniquely identifies the DataSource. You cannot use more
+ * than one DataSource in the same VM with the same name.
+ *
+ * @return name of this DataSource
+ */
+ public String getDataSourceName() {
+ return dataSourceName;
+ }
+
+ /**
+ * Sets the name of this DataSource. This is required, and uniquely identifies the DataSource. You
+ * cannot create or use more than one DataSource in the same VM with the same name.
+ *
+ * @param dataSourceName datasource name
+ * @throws IllegalStateException The Data Source Name cannot be changed after the DataSource has
+ * been used.
+ * @throws IllegalArgumentException Another PoolingDataSource with the same dataSourceName already
+ * exists.
+ */
+ public void setDataSourceName(String dataSourceName) {
+ if (initialized) {
+ throw new IllegalStateException(
+ "Cannot set Data Source properties after DataSource has been used");
+ }
+ if (this.dataSourceName != null && dataSourceName != null
+ && dataSourceName.equals(this.dataSourceName)) {
+ return;
+ }
+ PGPoolingDataSource previous = dataSources.putIfAbsent(dataSourceName, this);
+ if (previous != null) {
+ throw new IllegalArgumentException(
+ "DataSource with name '" + dataSourceName + "' already exists!");
+ }
+ if (this.dataSourceName != null) {
+ dataSources.remove(this.dataSourceName);
+ }
+ this.dataSourceName = dataSourceName;
+ }
+
+ /**
+ * Initializes this DataSource. If the initialConnections is greater than zero, that number of
+ * connections will be created. After this method is called, the DataSource properties cannot be
+ * changed. If you do not call this explicitly, it will be called the first time you get a
+ * connection from the DataSource.
+ *
+ * @throws SQLException Occurs when the initialConnections is greater than zero, but the
+ * DataSource is not able to create enough physical connections.
+ */
+ public void initialize() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ PGConnectionPoolDataSource source = createConnectionPool();
+ this.source = source;
+ try {
+ source.initializeFrom(this);
+ } catch (Exception e) {
+ throw new PSQLException(GT.tr("Failed to setup DataSource."), PSQLState.UNEXPECTED_ERROR,
+ e);
+ }
+
+ while (available.size() < initialConnections) {
+ available.push(source.getPooledConnection());
+ }
+
+ initialized = true;
+ }
+ }
+
+ protected boolean isInitialized() {
+ return initialized;
+ }
+
+ /**
+ * Creates the appropriate ConnectionPool to use for this DataSource.
+ *
+ * @return appropriate ConnectionPool to use for this DataSource
+ */
+ protected PGConnectionPoolDataSource createConnectionPool() {
+ return new PGConnectionPoolDataSource();
+ }
+
+ /**
+ * Gets a non-pooled connection, unless the user and password are the same as the default
+ * values for this connection pool.
+ *
+ * @return A pooled connection.
+ * @throws SQLException Occurs when no pooled connection is available, and a new physical
+ * connection cannot be created.
+ */
+ @Override
+ public Connection getConnection(String user, String password)
+ throws SQLException {
+ // If this is for the default user/password, use a pooled connection
+ if (user == null || (user.equals(getUser()) && ((password == null && getPassword() == null)
+ || (password != null && password.equals(getPassword()))))) {
+ return getConnection();
+ }
+ // Otherwise, use a non-pooled connection
+ if (!initialized) {
+ initialize();
+ }
+ return super.getConnection(user, password);
+ }
+
+ /**
+ * Gets a connection from the connection pool.
+ *
+ * @return A pooled connection.
+ * @throws SQLException Occurs when no pooled connection is available, and a new physical
+ * connection cannot be created.
+ */
+ @Override
+ public Connection getConnection() throws SQLException {
+ if (!initialized) {
+ initialize();
+ }
+ return getPooledConnection();
+ }
+
+ /**
+ * Closes this DataSource, and all the pooled connections, whether in use or not.
+ */
+ public void close() {
+ try (ResourceLock ignore = lock.obtain()) {
+ isClosed = true;
+ while (!available.isEmpty()) {
+ PooledConnection pci = available.pop();
+ try {
+ pci.close();
+ } catch (SQLException ignored) {
+ }
+ }
+ while (!used.isEmpty()) {
+ PooledConnection pci = used.pop();
+ pci.removeConnectionEventListener(connectionEventListener);
+ try {
+ pci.close();
+ } catch (SQLException ignored) {
+ }
+ }
+ }
+ removeStoredDataSource();
+ }
+
+ protected void removeStoredDataSource() {
+ dataSources.remove(dataSourceName);
+ }
+
+ protected void addDataSource(String dataSourceName) {
+ dataSources.put(dataSourceName, this);
+ }
+
+ /**
+ * Gets a connection from the pool. Will get an available one if present, or create a new one if
+ * under the max limit. Will block if all used and a new one would exceed the max.
+ */
+ private Connection getPooledConnection() throws SQLException {
+ PooledConnection pc = null;
+ try (ResourceLock ignore = lock.obtain()) {
+ if (isClosed) {
+ throw new PSQLException(GT.tr("DataSource has been closed."),
+ PSQLState.CONNECTION_DOES_NOT_EXIST);
+ }
+ while (true) {
+ if (!available.isEmpty()) {
+ pc = available.pop();
+ used.push(pc);
+ break;
+ }
+ if (maxConnections == 0 || used.size() < maxConnections) {
+ pc = source.getPooledConnection();
+ used.push(pc);
+ break;
+ } else {
+ try {
+ // Wake up every second at a minimum
+ lockCondition.await(1000L, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException ignored) {
+ }
+ }
+ }
+ }
+ pc.addConnectionEventListener(connectionEventListener);
+ return pc.getConnection();
+ }
+
+ /**
+ * Adds custom properties for this DataSource to the properties defined in the superclass.
+ */
+ @Override
+ public Reference getReference() throws NamingException {
+ Reference ref = super.getReference();
+ ref.add(new StringRefAddr("dataSourceName", dataSourceName));
+ if (initialConnections > 0) {
+ ref.add(new StringRefAddr("initialConnections", Integer.toString(initialConnections)));
+ }
+ if (maxConnections > 0) {
+ ref.add(new StringRefAddr("maxConnections", Integer.toString(maxConnections)));
+ }
+ return ref;
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException {
+ return iface.isAssignableFrom(getClass());
+ }
+
+ @Override
+ public T unwrap(Class iface) throws SQLException {
+ if (iface.isAssignableFrom(getClass())) {
+ return iface.cast(this);
+ }
+ throw new SQLException("Cannot unwrap to " + iface.getName());
}
- throw new SQLException("Cannot unwrap to " + iface.getName());
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java
index f3865dc..3911108 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java
@@ -5,16 +5,14 @@
package org.postgresql.ds;
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.util.DriverInfo;
-
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.sql.SQLException;
-
import javax.sql.DataSource;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.util.DriverInfo;
/**
* Simple DataSource which does not perform connection pooling. In order to use the DataSource, you
@@ -25,32 +23,32 @@ import javax.sql.DataSource;
*/
@SuppressWarnings("serial")
public class PGSimpleDataSource extends BaseDataSource implements DataSource, Serializable {
- /**
- * Gets a description of this DataSource.
- */
- @Override
- public String getDescription() {
- return "Non-Pooling DataSource from " + DriverInfo.DRIVER_FULL_NAME;
- }
-
- private void writeObject(ObjectOutputStream out) throws IOException {
- writeBaseObject(out);
- }
-
- private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
- readBaseObject(in);
- }
-
- @Override
- public boolean isWrapperFor(Class> iface) throws SQLException {
- return iface.isAssignableFrom(getClass());
- }
-
- @Override
- public T unwrap(Class iface) throws SQLException {
- if (iface.isAssignableFrom(getClass())) {
- return iface.cast(this);
+ /**
+ * Gets a description of this DataSource.
+ */
+ @Override
+ public String getDescription() {
+ return "Non-Pooling DataSource from " + DriverInfo.DRIVER_FULL_NAME;
+ }
+
+ private void writeObject(ObjectOutputStream out) throws IOException {
+ writeBaseObject(out);
+ }
+
+ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+ readBaseObject(in);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException {
+ return iface.isAssignableFrom(getClass());
+ }
+
+ @Override
+ public T unwrap(Class iface) throws SQLException {
+ if (iface.isAssignableFrom(getClass())) {
+ return iface.cast(this);
+ }
+ throw new SQLException("Cannot unwrap to " + iface.getName());
}
- throw new SQLException("Cannot unwrap to " + iface.getName());
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java
index 612c26e..cf39f88 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java
@@ -5,16 +5,6 @@
package org.postgresql.ds.common;
-import org.postgresql.Driver;
-import org.postgresql.PGProperty;
-import org.postgresql.jdbc.AutoSave;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.util.ExpressionProperties;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-import org.postgresql.util.URLCoder;
-
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -28,13 +18,21 @@ import java.util.Arrays;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
-
import javax.naming.NamingException;
import javax.naming.RefAddr;
import javax.naming.Reference;
import javax.naming.Referenceable;
import javax.naming.StringRefAddr;
import javax.sql.CommonDataSource;
+import org.postgresql.Driver;
+import org.postgresql.PGProperty;
+import org.postgresql.jdbc.AutoSave;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.util.ExpressionProperties;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import org.postgresql.util.URLCoder;
/**
* Base class for data sources and related classes.
@@ -43,1806 +41,1800 @@ import javax.sql.CommonDataSource;
*/
public abstract class BaseDataSource implements CommonDataSource, Referenceable {
- private static final Logger LOGGER = Logger.getLogger(BaseDataSource.class.getName());
-
- // Standard properties, defined in the JDBC 2.0 Optional Package spec
- private String[] serverNames = new String[]{"localhost"};
- private String databaseName = "";
- private String user;
- private String password;
- private int[] portNumbers = new int[]{0};
-
- // Map for all other properties
- private Properties properties = new Properties();
-
- /*
- * Ensure the driver is loaded as JDBC Driver might be invisible to Java's ServiceLoader.
- * Usually, {@code Class.forName(...)} is not required as {@link DriverManager} detects JDBC drivers
- * via {@code META-INF/services/java.sql.Driver} entries. However there might be cases when the driver
- * is located at the application level classloader, thus it might be required to perform manual
- * registration of the driver.
- */
- static {
- try {
- Class.forName("org.postgresql.Driver");
- } catch (ClassNotFoundException e) {
- throw new IllegalStateException(
- "BaseDataSource is unable to load org.postgresql.Driver. Please check if you have proper PostgreSQL JDBC Driver jar on the classpath",
- e);
- }
- }
-
- /**
- * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
- * properties serverName, databaseName, and portNumber. The user to connect as is identified by
- * the DataSource properties user and password.
- *
- * @return A valid database connection.
- * @throws SQLException Occurs when the database connection cannot be established.
- */
- public Connection getConnection() throws SQLException {
- return getConnection(user, password);
- }
-
- /**
- * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
- * properties serverName, databaseName, and portNumber. The user to connect as is identified by
- * the arguments user and password, which override the DataSource properties by the same name.
- *
- * @param user user
- * @param password password
- * @return A valid database connection.
- * @throws SQLException Occurs when the database connection cannot be established.
- */
- public Connection getConnection(String user, String password)
- throws SQLException {
- try {
- Connection con = DriverManager.getConnection(getUrl(), user, password);
- if (LOGGER.isLoggable(Level.FINE)) {
- LOGGER.log(Level.FINE, "Created a {0} for {1} at {2}",
- new Object[]{getDescription(), user, getUrl()});
- }
- return con;
- } catch (SQLException e) {
- LOGGER.log(Level.FINE, "Failed to create a {0} for {1} at {2}: {3}",
- new Object[]{getDescription(), user, getUrl(), e});
- throw e;
- }
- }
-
- /**
- * This implementation don't use a LogWriter.
- */
- @Override
- public PrintWriter getLogWriter() {
- return null;
- }
-
- /**
- * This implementation don't use a LogWriter.
- *
- * @param printWriter Not used
- */
- @Override
- public void setLogWriter(PrintWriter printWriter) {
- // NOOP
- }
-
- /**
- * Gets the name of the host the PostgreSQL database is running on.
- *
- * @return name of the host the PostgreSQL database is running on
- * @deprecated use {@link #getServerNames()}
- */
- @Deprecated
- public String getServerName() {
- return serverNames[0];
- }
-
- /**
- * Gets the name of the host(s) the PostgreSQL database is running on.
- *
- * @return name of the host(s) the PostgreSQL database is running on
- */
- public String[] getServerNames() {
- return serverNames;
- }
-
- /**
- * Sets the name of the host the PostgreSQL database is running on. If this is changed, it will
- * only affect future calls to getConnection. The default value is {@code localhost}.
- *
- * @param serverName name of the host the PostgreSQL database is running on
- * @deprecated use {@link #setServerNames(String[])}
- */
- @Deprecated
- public void setServerName(String serverName) {
- this.setServerNames(new String[]{serverName});
- }
-
- /**
- * Sets the name of the host(s) the PostgreSQL database is running on. If this is changed, it will
- * only affect future calls to getConnection. The default value is {@code localhost}.
- *
- * @param serverNames name of the host(s) the PostgreSQL database is running on
- */
- @SuppressWarnings("nullness")
- public void setServerNames(String [] serverNames) {
- if (serverNames == null || serverNames.length == 0) {
- this.serverNames = new String[]{"localhost"};
- } else {
- serverNames = serverNames.clone();
- for (int i = 0; i < serverNames.length; i++) {
- String serverName = serverNames[i];
- if (serverName == null || "".equals(serverName)) {
- serverNames[i] = "localhost";
- }
- }
- this.serverNames = serverNames;
- }
- }
-
- /**
- * Gets the name of the PostgreSQL database, running on the server identified by the serverName
- * property.
- *
- * @return name of the PostgreSQL database
- */
- public String getDatabaseName() {
- return databaseName;
- }
-
- /**
- * Sets the name of the PostgreSQL database, running on the server identified by the serverName
- * property. If this is changed, it will only affect future calls to getConnection.
- *
- * @param databaseName name of the PostgreSQL database
- */
- public void setDatabaseName(String databaseName) {
- this.databaseName = databaseName;
- }
-
- /**
- * Gets a description of this DataSource-ish thing. Must be customized by subclasses.
- *
- * @return description of this DataSource-ish thing
- */
- public abstract String getDescription();
-
- /**
- * Gets the user to connect as by default. If this is not specified, you must use the
- * getConnection method which takes a user and password as parameters.
- *
- * @return user to connect as by default
- */
- public String getUser() {
- return user;
- }
-
- /**
- * Sets the user to connect as by default. If this is not specified, you must use the
- * getConnection method which takes a user and password as parameters. If this is changed, it will
- * only affect future calls to getConnection.
- *
- * @param user user to connect as by default
- */
- public void setUser(String user) {
- this.user = user;
- }
-
- /**
- * Gets the password to connect with by default. If this is not specified but a password is needed
- * to log in, you must use the getConnection method which takes a user and password as parameters.
- *
- * @return password to connect with by default
- */
- public String getPassword() {
- return password;
- }
-
- /**
- * Sets the password to connect with by default. If this is not specified but a password is needed
- * to log in, you must use the getConnection method which takes a user and password as parameters.
- * If this is changed, it will only affect future calls to getConnection.
- *
- * @param password password to connect with by default
- */
- public void setPassword(String password) {
- this.password = password;
- }
-
- /**
- * Gets the port which the PostgreSQL server is listening on for TCP/IP connections.
- *
- * @return The port, or 0 if the default port will be used.
- * @deprecated use {@link #getPortNumbers()}
- */
- @Deprecated
- public int getPortNumber() {
- if (portNumbers == null || portNumbers.length == 0) {
- return 0;
- }
- return portNumbers[0];
- }
-
- /**
- * Gets the port(s) which the PostgreSQL server is listening on for TCP/IP connections.
- *
- * @return The port(s), or 0 if the default port will be used.
- */
- public int[] getPortNumbers() {
- return portNumbers;
- }
-
- /**
- * Sets the port which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
- * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
- * the default port will be used.
- *
- * @param portNumber port which the PostgreSQL server is listening on for TCP/IP
- * @deprecated use {@link #setPortNumbers(int[])}
- */
- @Deprecated
- public void setPortNumber(int portNumber) {
- setPortNumbers(new int[]{portNumber});
- }
-
- /**
- * Sets the port(s) which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
- * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
- * the default port will be used.
- *
- * @param portNumbers port(s) which the PostgreSQL server is listening on for TCP/IP
- */
- public void setPortNumbers(int [] portNumbers) {
- if (portNumbers == null || portNumbers.length == 0) {
- portNumbers = new int[]{0};
- }
- this.portNumbers = Arrays.copyOf(portNumbers, portNumbers.length);
- }
-
- /**
- * @return command line options for this connection
- */
- public String getOptions() {
- return PGProperty.OPTIONS.getOrDefault(properties);
- }
-
- /**
- * Set command line options for this connection
- *
- * @param options string to set options to
- */
- public void setOptions(String options) {
- PGProperty.OPTIONS.set(properties, options);
- }
-
- /**
- * @return login timeout
- * @see PGProperty#LOGIN_TIMEOUT
- */
- @Override
- public int getLoginTimeout() {
- return PGProperty.LOGIN_TIMEOUT.getIntNoCheck(properties);
- }
-
- /**
- * @param loginTimeout login timeout
- * @see PGProperty#LOGIN_TIMEOUT
- */
- @Override
- public void setLoginTimeout(int loginTimeout) {
- PGProperty.LOGIN_TIMEOUT.set(properties, loginTimeout);
- }
-
- /**
- * @return connect timeout
- * @see PGProperty#CONNECT_TIMEOUT
- */
- public int getConnectTimeout() {
- return PGProperty.CONNECT_TIMEOUT.getIntNoCheck(properties);
- }
-
- /**
- * @param connectTimeout connect timeout
- * @see PGProperty#CONNECT_TIMEOUT
- */
- public void setConnectTimeout(int connectTimeout) {
- PGProperty.CONNECT_TIMEOUT.set(properties, connectTimeout);
- }
-
- /**
- *
- * @return GSS ResponseTimeout
- * @see PGProperty#GSS_RESPONSE_TIMEOUT
- */
- public int getGssResponseTimeout() {
- return PGProperty.GSS_RESPONSE_TIMEOUT.getIntNoCheck(properties);
- }
-
- /**
- *
- * @param gssResponseTimeout gss response timeout
- * @see PGProperty#GSS_RESPONSE_TIMEOUT
- */
- public void setGssResponseTimeout(int gssResponseTimeout) {
- PGProperty.GSS_RESPONSE_TIMEOUT.set(properties, gssResponseTimeout);
- }
-
- /**
- *
- * @return SSL ResponseTimeout
- * @see PGProperty#SSL_RESPONSE_TIMEOUT
- */
- public int getSslResponseTimeout() {
- return PGProperty.SSL_RESPONSE_TIMEOUT.getIntNoCheck(properties);
- }
-
- /**
- *
- * @param sslResponseTimeout ssl response timeout
- * @see PGProperty#SSL_RESPONSE_TIMEOUT
- */
- public void setSslResponseTimeout(int sslResponseTimeout) {
- PGProperty.SSL_RESPONSE_TIMEOUT.set(properties, sslResponseTimeout);
- }
-
- /**
- * @return protocol version
- * @see PGProperty#PROTOCOL_VERSION
- */
- public int getProtocolVersion() {
- if (!PGProperty.PROTOCOL_VERSION.isPresent(properties)) {
- return 0;
- } else {
- return PGProperty.PROTOCOL_VERSION.getIntNoCheck(properties);
- }
- }
-
- /**
- * @param protocolVersion protocol version
- * @see PGProperty#PROTOCOL_VERSION
- */
- public void setProtocolVersion(int protocolVersion) {
- if (protocolVersion == 0) {
- PGProperty.PROTOCOL_VERSION.set(properties, null);
- } else {
- PGProperty.PROTOCOL_VERSION.set(properties, protocolVersion);
- }
- }
-
- /**
- * @return quoteReturningIdentifiers
- * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
- */
- public boolean getQuoteReturningIdentifiers() {
- return PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(properties);
- }
-
- /**
- * @param quoteIdentifiers indicate whether to quote identifiers
- * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
- */
- public void setQuoteReturningIdentifiers(boolean quoteIdentifiers) {
- PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(properties, quoteIdentifiers);
- }
-
- /**
- * @return receive buffer size
- * @see PGProperty#RECEIVE_BUFFER_SIZE
- */
- public int getReceiveBufferSize() {
- return PGProperty.RECEIVE_BUFFER_SIZE.getIntNoCheck(properties);
- }
-
- /**
- * @param nbytes receive buffer size
- * @see PGProperty#RECEIVE_BUFFER_SIZE
- */
- public void setReceiveBufferSize(int nbytes) {
- PGProperty.RECEIVE_BUFFER_SIZE.set(properties, nbytes);
- }
-
- /**
- * @return send buffer size
- * @see PGProperty#SEND_BUFFER_SIZE
- */
- public int getSendBufferSize() {
- return PGProperty.SEND_BUFFER_SIZE.getIntNoCheck(properties);
- }
-
- /**
- * @param nbytes send buffer size
- * @see PGProperty#SEND_BUFFER_SIZE
- */
- public void setSendBufferSize(int nbytes) {
- PGProperty.SEND_BUFFER_SIZE.set(properties, nbytes);
- }
-
- /**
- * @param count prepare threshold
- * @see PGProperty#PREPARE_THRESHOLD
- */
- public void setPrepareThreshold(int count) {
- PGProperty.PREPARE_THRESHOLD.set(properties, count);
- }
-
- /**
- * @return prepare threshold
- * @see PGProperty#PREPARE_THRESHOLD
- */
- public int getPrepareThreshold() {
- return PGProperty.PREPARE_THRESHOLD.getIntNoCheck(properties);
- }
-
- /**
- * @return prepared statement cache size (number of statements per connection)
- * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
- */
- public int getPreparedStatementCacheQueries() {
- return PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getIntNoCheck(properties);
- }
-
- /**
- * @param cacheSize prepared statement cache size (number of statements per connection)
- * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
- */
- public void setPreparedStatementCacheQueries(int cacheSize) {
- PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.set(properties, cacheSize);
- }
-
- /**
- * @return prepared statement cache size (number of megabytes per connection)
- * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
- */
- public int getPreparedStatementCacheSizeMiB() {
- return PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getIntNoCheck(properties);
- }
-
- /**
- * @param cacheSize statement cache size (number of megabytes per connection)
- * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
- */
- public void setPreparedStatementCacheSizeMiB(int cacheSize) {
- PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.set(properties, cacheSize);
- }
-
- /**
- * @return database metadata cache fields size (number of fields cached per connection)
- * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
- */
- public int getDatabaseMetadataCacheFields() {
- return PGProperty.DATABASE_METADATA_CACHE_FIELDS.getIntNoCheck(properties);
- }
-
- /**
- * @param cacheSize database metadata cache fields size (number of fields cached per connection)
- * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
- */
- public void setDatabaseMetadataCacheFields(int cacheSize) {
- PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(properties, cacheSize);
- }
-
- /**
- * @return database metadata cache fields size (number of megabytes per connection)
- * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
- */
- public int getDatabaseMetadataCacheFieldsMiB() {
- return PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getIntNoCheck(properties);
- }
-
- /**
- * @param cacheSize database metadata cache fields size (number of megabytes per connection)
- * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
- */
- public void setDatabaseMetadataCacheFieldsMiB(int cacheSize) {
- PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(properties, cacheSize);
- }
-
- /**
- * @param fetchSize default fetch size
- * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
- */
- public void setDefaultRowFetchSize(int fetchSize) {
- PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize);
- }
-
- /**
- * @return default fetch size
- * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
- */
- public int getDefaultRowFetchSize() {
- return PGProperty.DEFAULT_ROW_FETCH_SIZE.getIntNoCheck(properties);
- }
-
- /**
- * @param unknownLength unknown length
- * @see PGProperty#UNKNOWN_LENGTH
- */
- public void setUnknownLength(int unknownLength) {
- PGProperty.UNKNOWN_LENGTH.set(properties, unknownLength);
- }
-
- /**
- * @return unknown length
- * @see PGProperty#UNKNOWN_LENGTH
- */
- public int getUnknownLength() {
- return PGProperty.UNKNOWN_LENGTH.getIntNoCheck(properties);
- }
-
- /**
- * @param seconds socket timeout
- * @see PGProperty#SOCKET_TIMEOUT
- */
- public void setSocketTimeout(int seconds) {
- PGProperty.SOCKET_TIMEOUT.set(properties, seconds);
- }
-
- /**
- * @return socket timeout
- * @see PGProperty#SOCKET_TIMEOUT
- */
- public int getSocketTimeout() {
- return PGProperty.SOCKET_TIMEOUT.getIntNoCheck(properties);
- }
-
- /**
- * @param seconds timeout that is used for sending cancel command
- * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
- */
- public void setCancelSignalTimeout(int seconds) {
- PGProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds);
- }
-
- /**
- * @return timeout that is used for sending cancel command in seconds
- * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
- */
- public int getCancelSignalTimeout() {
- return PGProperty.CANCEL_SIGNAL_TIMEOUT.getIntNoCheck(properties);
- }
-
- /**
- * @param enabled if SSL is enabled
- * @see PGProperty#SSL
- */
- public void setSsl(boolean enabled) {
- if (enabled) {
- PGProperty.SSL.set(properties, true);
- } else {
- PGProperty.SSL.set(properties, false);
- }
- }
-
- /**
- * @return true if SSL is enabled
- * @see PGProperty#SSL
- */
- public boolean getSsl() {
- // "true" if "ssl" is set but empty
- return PGProperty.SSL.getBoolean(properties) || "".equals(PGProperty.SSL.getOrDefault(properties));
- }
-
- /**
- * @param classname SSL factory class name
- * @see PGProperty#SSL_FACTORY
- */
- public void setSslfactory(String classname) {
- PGProperty.SSL_FACTORY.set(properties, classname);
- }
-
- /**
- * @return SSL factory class name
- * @see PGProperty#SSL_FACTORY
- */
- public String getSslfactory() {
- return PGProperty.SSL_FACTORY.getOrDefault(properties);
- }
-
- /**
- * @return SSL mode
- * @see PGProperty#SSL_MODE
- */
- public String getSslMode() {
- return PGProperty.SSL_MODE.getOrDefault(properties);
- }
-
- /**
- * @param mode SSL mode
- * @see PGProperty#SSL_MODE
- */
- public void setSslMode(String mode) {
- PGProperty.SSL_MODE.set(properties, mode);
- }
-
- /**
- * @return SSL mode
- * @see PGProperty#SSL_FACTORY_ARG
- */
- @SuppressWarnings("deprecation")
- public String getSslFactoryArg() {
- return PGProperty.SSL_FACTORY_ARG.getOrDefault(properties);
- }
-
- /**
- * @param arg argument forwarded to SSL factory
- * @see PGProperty#SSL_FACTORY_ARG
- */
- @SuppressWarnings("deprecation")
- public void setSslFactoryArg(String arg) {
- PGProperty.SSL_FACTORY_ARG.set(properties, arg);
- }
-
- /**
- * @return argument forwarded to SSL factory
- * @see PGProperty#SSL_HOSTNAME_VERIFIER
- */
- public String getSslHostnameVerifier() {
- return PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(properties);
- }
-
- /**
- * @param className SSL hostname verifier
- * @see PGProperty#SSL_HOSTNAME_VERIFIER
- */
- public void setSslHostnameVerifier(String className) {
- PGProperty.SSL_HOSTNAME_VERIFIER.set(properties, className);
- }
-
- /**
- * @return className SSL hostname verifier
- * @see PGProperty#SSL_CERT
- */
- public String getSslCert() {
- return PGProperty.SSL_CERT.getOrDefault(properties);
- }
-
- /**
- * @param file SSL certificate
- * @see PGProperty#SSL_CERT
- */
- public void setSslCert(String file) {
- PGProperty.SSL_CERT.set(properties, file);
- }
-
- /**
- * @return SSL certificate
- * @see PGProperty#SSL_KEY
- */
- public String getSslKey() {
- return PGProperty.SSL_KEY.getOrDefault(properties);
- }
-
- /**
- * @param file SSL key
- * @see PGProperty#SSL_KEY
- */
- public void setSslKey(String file) {
- PGProperty.SSL_KEY.set(properties, file);
- }
-
- /**
- * @return SSL root certificate
- * @see PGProperty#SSL_ROOT_CERT
- */
- public String getSslRootCert() {
- return PGProperty.SSL_ROOT_CERT.getOrDefault(properties);
- }
-
- /**
- * @param file SSL root certificate
- * @see PGProperty#SSL_ROOT_CERT
- */
- public void setSslRootCert(String file) {
- PGProperty.SSL_ROOT_CERT.set(properties, file);
- }
-
- /**
- * @return SSL password
- * @see PGProperty#SSL_PASSWORD
- */
- public String getSslPassword() {
- return PGProperty.SSL_PASSWORD.getOrDefault(properties);
- }
-
- /**
- * @param password SSL password
- * @see PGProperty#SSL_PASSWORD
- */
- public void setSslPassword(String password) {
- PGProperty.SSL_PASSWORD.set(properties, password);
- }
-
- /**
- * @return SSL password callback
- * @see PGProperty#SSL_PASSWORD_CALLBACK
- */
- public String getSslPasswordCallback() {
- return PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(properties);
- }
-
- /**
- * @param className SSL password callback class name
- * @see PGProperty#SSL_PASSWORD_CALLBACK
- */
- public void setSslPasswordCallback(String className) {
- PGProperty.SSL_PASSWORD_CALLBACK.set(properties, className);
- }
-
- /**
- * @param applicationName application name
- * @see PGProperty#APPLICATION_NAME
- */
- public void setApplicationName(String applicationName) {
- PGProperty.APPLICATION_NAME.set(properties, applicationName);
- }
-
- /**
- * @return application name
- * @see PGProperty#APPLICATION_NAME
- */
- public String getApplicationName() {
- return PGProperty.APPLICATION_NAME.getOrDefault(properties);
- }
-
- /**
- * @param targetServerType target server type
- * @see PGProperty#TARGET_SERVER_TYPE
- */
- public void setTargetServerType(String targetServerType) {
- PGProperty.TARGET_SERVER_TYPE.set(properties, targetServerType);
- }
-
- /**
- * @return target server type
- * @see PGProperty#TARGET_SERVER_TYPE
- */
- public String getTargetServerType() {
- return PGProperty.TARGET_SERVER_TYPE.getOrDefault(properties);
- }
-
- /**
- * @param loadBalanceHosts load balance hosts
- * @see PGProperty#LOAD_BALANCE_HOSTS
- */
- public void setLoadBalanceHosts(boolean loadBalanceHosts) {
- PGProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts);
- }
-
- /**
- * @return load balance hosts
- * @see PGProperty#LOAD_BALANCE_HOSTS
- */
- public boolean getLoadBalanceHosts() {
- return PGProperty.LOAD_BALANCE_HOSTS.isPresent(properties);
- }
-
- /**
- * @param hostRecheckSeconds host recheck seconds
- * @see PGProperty#HOST_RECHECK_SECONDS
- */
- public void setHostRecheckSeconds(int hostRecheckSeconds) {
- PGProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds);
- }
-
- /**
- * @return host recheck seconds
- * @see PGProperty#HOST_RECHECK_SECONDS
- */
- public int getHostRecheckSeconds() {
- return PGProperty.HOST_RECHECK_SECONDS.getIntNoCheck(properties);
- }
-
- /**
- * @param enabled if TCP keep alive should be enabled
- * @see PGProperty#TCP_KEEP_ALIVE
- */
- public void setTcpKeepAlive(boolean enabled) {
- PGProperty.TCP_KEEP_ALIVE.set(properties, enabled);
- }
-
- /**
- * @return true if TCP keep alive is enabled
- * @see PGProperty#TCP_KEEP_ALIVE
- */
- public boolean getTcpKeepAlive() {
- return PGProperty.TCP_KEEP_ALIVE.getBoolean(properties);
- }
-
- /**
- * @param enabled if TCP no delay should be enabled
- * @see PGProperty#TCP_NO_DELAY
- */
- public void setTcpNoDelay(boolean enabled) {
- PGProperty.TCP_NO_DELAY.set(properties, enabled);
- }
-
- /**
- * @return true if TCP no delay is enabled
- * @see PGProperty#TCP_NO_DELAY
- */
- public boolean getTcpNoDelay() {
- return PGProperty.TCP_NO_DELAY.getBoolean(properties);
- }
-
- /**
- * @param enabled if binary transfer should be enabled
- * @see PGProperty#BINARY_TRANSFER
- */
- public void setBinaryTransfer(boolean enabled) {
- PGProperty.BINARY_TRANSFER.set(properties, enabled);
- }
-
- /**
- * @return true if binary transfer is enabled
- * @see PGProperty#BINARY_TRANSFER
- */
- public boolean getBinaryTransfer() {
- return PGProperty.BINARY_TRANSFER.getBoolean(properties);
- }
-
- /**
- * @param oidList list of OIDs that are allowed to use binary transfer
- * @see PGProperty#BINARY_TRANSFER_ENABLE
- */
- public void setBinaryTransferEnable(String oidList) {
- PGProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList);
- }
-
- /**
- * @return list of OIDs that are allowed to use binary transfer
- * @see PGProperty#BINARY_TRANSFER_ENABLE
- */
- public String getBinaryTransferEnable() {
- return PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(properties);
- }
-
- /**
- * @param oidList list of OIDs that are not allowed to use binary transfer
- * @see PGProperty#BINARY_TRANSFER_DISABLE
- */
- public void setBinaryTransferDisable(String oidList) {
- PGProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList);
- }
-
- /**
- * @return list of OIDs that are not allowed to use binary transfer
- * @see PGProperty#BINARY_TRANSFER_DISABLE
- */
- public String getBinaryTransferDisable() {
- return PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(properties);
- }
-
- /**
- * @return string type
- * @see PGProperty#STRING_TYPE
- */
- public String getStringType() {
- return PGProperty.STRING_TYPE.getOrDefault(properties);
- }
-
- /**
- * @param stringType string type
- * @see PGProperty#STRING_TYPE
- */
- public void setStringType(String stringType) {
- PGProperty.STRING_TYPE.set(properties, stringType);
- }
-
- /**
- * @return true if column sanitizer is disabled
- * @see PGProperty#DISABLE_COLUMN_SANITISER
- */
- public boolean isColumnSanitiserDisabled() {
- return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
- }
-
- /**
- * @return true if column sanitizer is disabled
- * @see PGProperty#DISABLE_COLUMN_SANITISER
- */
- public boolean getDisableColumnSanitiser() {
- return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
- }
-
- /**
- * @param disableColumnSanitiser if column sanitizer should be disabled
- * @see PGProperty#DISABLE_COLUMN_SANITISER
- */
- public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
- PGProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser);
- }
-
- /**
- * @return current schema
- * @see PGProperty#CURRENT_SCHEMA
- */
- public String getCurrentSchema() {
- return PGProperty.CURRENT_SCHEMA.getOrDefault(properties);
- }
-
- /**
- * @param currentSchema current schema
- * @see PGProperty#CURRENT_SCHEMA
- */
- public void setCurrentSchema(String currentSchema) {
- PGProperty.CURRENT_SCHEMA.set(properties, currentSchema);
- }
-
- /**
- * @return true if connection is readonly
- * @see PGProperty#READ_ONLY
- */
- public boolean getReadOnly() {
- return PGProperty.READ_ONLY.getBoolean(properties);
- }
-
- /**
- * @param readOnly if connection should be readonly
- * @see PGProperty#READ_ONLY
- */
- public void setReadOnly(boolean readOnly) {
- PGProperty.READ_ONLY.set(properties, readOnly);
- }
-
- /**
- * @return The behavior when set read only
- * @see PGProperty#READ_ONLY_MODE
- */
- public String getReadOnlyMode() {
- return PGProperty.READ_ONLY_MODE.getOrDefault(properties);
- }
-
- /**
- * @param mode the behavior when set read only
- * @see PGProperty#READ_ONLY_MODE
- */
- public void setReadOnlyMode(String mode) {
- PGProperty.READ_ONLY_MODE.set(properties, mode);
- }
-
- /**
- * @return true if driver should log unclosed connections
- * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
- */
- public boolean getLogUnclosedConnections() {
- return PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(properties);
- }
-
- /**
- * @param enabled true if driver should log unclosed connections
- * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
- */
- public void setLogUnclosedConnections(boolean enabled) {
- PGProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled);
- }
-
- /**
- * @return true if driver should log include detail in server error messages
- * @see PGProperty#LOG_SERVER_ERROR_DETAIL
- */
- public boolean getLogServerErrorDetail() {
- return PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(properties);
- }
-
- /**
- * @param enabled true if driver should include detail in server error messages
- * @see PGProperty#LOG_SERVER_ERROR_DETAIL
- */
- public void setLogServerErrorDetail(boolean enabled) {
- PGProperty.LOG_SERVER_ERROR_DETAIL.set(properties, enabled);
- }
-
- /**
- * @return assumed minimal server version
- * @see PGProperty#ASSUME_MIN_SERVER_VERSION
- */
- public String getAssumeMinServerVersion() {
- return PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(properties);
- }
-
- /**
- * @param minVersion assumed minimal server version
- * @see PGProperty#ASSUME_MIN_SERVER_VERSION
- */
- public void setAssumeMinServerVersion(String minVersion) {
- PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, minVersion);
- }
-
- /**
- * This is important in pool-by-transaction scenarios in order to make sure that all the statements
- * reaches the same connection that is being initialized. If set then we will group the startup
- * parameters in a transaction
- * @return whether to group startup parameters or not
- * @see PGProperty#GROUP_STARTUP_PARAMETERS
- */
- public boolean getGroupStartupParameters() {
- return PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(properties);
- }
-
- /**
- *
- * @param groupStartupParameters whether to group startup Parameters in a transaction or not
- * @see PGProperty#GROUP_STARTUP_PARAMETERS
- */
- public void setGroupStartupParameters(boolean groupStartupParameters) {
- PGProperty.GROUP_STARTUP_PARAMETERS.set(properties, groupStartupParameters);
- }
-
- /**
- * @return JAAS application name
- * @see PGProperty#JAAS_APPLICATION_NAME
- */
- public String getJaasApplicationName() {
- return PGProperty.JAAS_APPLICATION_NAME.getOrDefault(properties);
- }
-
- /**
- * @param name JAAS application name
- * @see PGProperty#JAAS_APPLICATION_NAME
- */
- public void setJaasApplicationName(String name) {
- PGProperty.JAAS_APPLICATION_NAME.set(properties, name);
- }
-
- /**
- * @return true if perform JAAS login before GSS authentication
- * @see PGProperty#JAAS_LOGIN
- */
- public boolean getJaasLogin() {
- return PGProperty.JAAS_LOGIN.getBoolean(properties);
- }
-
- /**
- * @param doLogin true if perform JAAS login before GSS authentication
- * @see PGProperty#JAAS_LOGIN
- */
- public void setJaasLogin(boolean doLogin) {
- PGProperty.JAAS_LOGIN.set(properties, doLogin);
- }
-
- /**
- * @return Kerberos server name
- * @see PGProperty#KERBEROS_SERVER_NAME
- */
- public String getKerberosServerName() {
- return PGProperty.KERBEROS_SERVER_NAME.getOrDefault(properties);
- }
-
- /**
- * @param serverName Kerberos server name
- * @see PGProperty#KERBEROS_SERVER_NAME
- */
- public void setKerberosServerName(String serverName) {
- PGProperty.KERBEROS_SERVER_NAME.set(properties, serverName);
- }
-
- /**
- * @return true if use SPNEGO
- * @see PGProperty#USE_SPNEGO
- */
- public boolean getUseSpNego() {
- return PGProperty.USE_SPNEGO.getBoolean(properties);
- }
-
- /**
- * @param use true if use SPNEGO
- * @see PGProperty#USE_SPNEGO
- */
- public void setUseSpNego(boolean use) {
- PGProperty.USE_SPNEGO.set(properties, use);
- }
-
- /**
- * @return GSS mode: auto, sspi, or gssapi
- * @see PGProperty#GSS_LIB
- */
- public String getGssLib() {
- return PGProperty.GSS_LIB.getOrDefault(properties);
- }
-
- /**
- * @param lib GSS mode: auto, sspi, or gssapi
- * @see PGProperty#GSS_LIB
- */
- public void setGssLib(String lib) {
- PGProperty.GSS_LIB.set(properties, lib);
- }
-
- /**
- *
- * @return GSS encryption mode: disable, prefer or require
- */
- public String getGssEncMode() {
- return PGProperty.GSS_ENC_MODE.getOrDefault(properties);
- }
-
- /**
- *
- * @param mode encryption mode: disable, prefer or require
- */
- public void setGssEncMode(String mode) {
- PGProperty.GSS_ENC_MODE.set(properties, mode);
- }
-
- /**
- * @return SSPI service class
- * @see PGProperty#SSPI_SERVICE_CLASS
- */
- public String getSspiServiceClass() {
- return PGProperty.SSPI_SERVICE_CLASS.getOrDefault(properties);
- }
-
- /**
- * @param serviceClass SSPI service class
- * @see PGProperty#SSPI_SERVICE_CLASS
- */
- public void setSspiServiceClass(String serviceClass) {
- PGProperty.SSPI_SERVICE_CLASS.set(properties, serviceClass);
- }
-
- /**
- * @return if connection allows encoding changes
- * @see PGProperty#ALLOW_ENCODING_CHANGES
- */
- public boolean getAllowEncodingChanges() {
- return PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(properties);
- }
-
- /**
- * @param allow if connection allows encoding changes
- * @see PGProperty#ALLOW_ENCODING_CHANGES
- */
- public void setAllowEncodingChanges(boolean allow) {
- PGProperty.ALLOW_ENCODING_CHANGES.set(properties, allow);
- }
-
- /**
- * @return socket factory class name
- * @see PGProperty#SOCKET_FACTORY
- */
- public String getSocketFactory() {
- return PGProperty.SOCKET_FACTORY.getOrDefault(properties);
- }
-
- /**
- * @param socketFactoryClassName socket factory class name
- * @see PGProperty#SOCKET_FACTORY
- */
- public void setSocketFactory(String socketFactoryClassName) {
- PGProperty.SOCKET_FACTORY.set(properties, socketFactoryClassName);
- }
-
- /**
- * @return socket factory argument
- * @see PGProperty#SOCKET_FACTORY_ARG
- */
- @SuppressWarnings("deprecation")
- public String getSocketFactoryArg() {
- return PGProperty.SOCKET_FACTORY_ARG.getOrDefault(properties);
- }
-
- /**
- * @param socketFactoryArg socket factory argument
- * @see PGProperty#SOCKET_FACTORY_ARG
- */
- @SuppressWarnings("deprecation")
- public void setSocketFactoryArg(String socketFactoryArg) {
- PGProperty.SOCKET_FACTORY_ARG.set(properties, socketFactoryArg);
- }
-
- /**
- * @param replication set to 'database' for logical replication or 'true' for physical replication
- * @see PGProperty#REPLICATION
- */
- public void setReplication(String replication) {
- PGProperty.REPLICATION.set(properties, replication);
- }
-
- /**
- * @return 'select', "callIfNoReturn', or 'call'
- * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
- */
- public String getEscapeSyntaxCallMode() {
- return PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(properties);
- }
-
- /**
- * @param callMode the call mode to use for JDBC escape call syntax
- * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
- */
- public void setEscapeSyntaxCallMode(String callMode) {
- PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(properties, callMode);
- }
-
- /**
- * @return null, 'database', or 'true
- * @see PGProperty#REPLICATION
- */
- public String getReplication() {
- return PGProperty.REPLICATION.getOrDefault(properties);
- }
-
- /**
- * @return the localSocketAddress
- * @see PGProperty#LOCAL_SOCKET_ADDRESS
- */
- public String getLocalSocketAddress() {
- return PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(properties);
- }
-
- /**
- * @param localSocketAddress local address to bind client side to
- * @see PGProperty#LOCAL_SOCKET_ADDRESS
- */
- public void setLocalSocketAddress(String localSocketAddress) {
- PGProperty.LOCAL_SOCKET_ADDRESS.set(properties, localSocketAddress);
- }
-
- /**
- * This property is no longer used by the driver and will be ignored.
- * @return loggerLevel in properties
- * @deprecated Configure via java.util.logging
- */
- @Deprecated
- public String getLoggerLevel() {
- return PGProperty.LOGGER_LEVEL.getOrDefault(properties);
- }
-
- /**
- * This property is no longer used by the driver and will be ignored.
- * @param loggerLevel loggerLevel to set, will be ignored
- * @deprecated Configure via java.util.logging
- */
- @Deprecated
- public void setLoggerLevel(String loggerLevel) {
- PGProperty.LOGGER_LEVEL.set(properties, loggerLevel);
- }
-
- /**
- * This property is no longer used by the driver and will be ignored.
- * @return loggerFile in properties
- * @deprecated Configure via java.util.logging
- */
- @Deprecated
- public String getLoggerFile() {
- ExpressionProperties exprProps = new ExpressionProperties(properties, System.getProperties());
- return PGProperty.LOGGER_FILE.getOrDefault(exprProps);
- }
-
- /**
- * This property is no longer used by the driver and will be ignored.
- * @param loggerFile will be ignored
- * @deprecated Configure via java.util.logging
- */
- @Deprecated
- public void setLoggerFile(String loggerFile) {
- PGProperty.LOGGER_FILE.set(properties, loggerFile);
- }
-
- /**
- * Generates a {@link DriverManager} URL from the other properties supplied.
- *
- * @return {@link DriverManager} URL from the other properties supplied
- */
- public String getUrl() {
- StringBuilder url = new StringBuilder(100);
- url.append("jdbc:postgresql://");
- for (int i = 0; i < serverNames.length; i++) {
- if (i > 0) {
- url.append(",");
- }
- url.append(serverNames[i]);
- if (portNumbers != null) {
- if (serverNames.length != portNumbers.length) {
- throw new IllegalArgumentException(
- String.format("Invalid argument: number of port %s entries must equal number of serverNames %s",
- Arrays.toString(portNumbers), Arrays.toString(serverNames)));
- }
- if (portNumbers.length >= i && portNumbers[i] != 0) {
- url.append(":").append(portNumbers[i]);
- }
-
- }
- }
- url.append("/");
- if (databaseName != null) {
- url.append(URLCoder.encode(databaseName));
- }
-
- StringBuilder query = new StringBuilder(100);
- for (PGProperty property : PGProperty.values()) {
- if (property.isPresent(properties)) {
- if (query.length() != 0) {
- query.append("&");
- }
- query.append(property.getName());
- query.append("=");
- String value = property.getOrDefault(properties);
- query.append(URLCoder.encode(value));
- }
- }
-
- if (query.length() > 0) {
- url.append("?");
- url.append(query);
- }
-
- return url.toString();
- }
-
- /**
- * Generates a {@link DriverManager} URL from the other properties supplied.
- *
- * @return {@link DriverManager} URL from the other properties supplied
- */
- public String getURL() {
- return getUrl();
- }
-
- /**
- * Sets properties from a {@link DriverManager} URL.
- *
- * @param url properties to set
- */
- public void setUrl(String url) {
-
- Properties p = Driver.parseURL(url, null);
-
- if (p == null) {
- throw new IllegalArgumentException("URL invalid " + url);
- }
- for (PGProperty property : PGProperty.values()) {
- if (!this.properties.containsKey(property.getName())) {
- setProperty(property, property.getOrDefault(p));
- }
- }
- }
-
- /**
- * Sets properties from a {@link DriverManager} URL.
- * Added to follow convention used in other DBMS.
- *
- * @param url properties to set
- */
- public void setURL(String url) {
- setUrl(url);
- }
-
- /**
- *
- * @return the class name to use for the Authentication Plugin.
- * This can be null in which case the default password authentication plugin will be used
- */
- public String getAuthenticationPluginClassName() {
- return PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(properties);
- }
-
- /**
- *
- * @param className name of a class which implements {@link org.postgresql.plugin.AuthenticationPlugin}
- * This class will be used to get the encoded bytes to be sent to the server as the
- * password to authenticate the user.
- *
- */
- public void setAuthenticationPluginClassName(String className) {
- PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.set(properties, className);
- }
-
- public String getProperty(String name) throws SQLException {
- PGProperty pgProperty = PGProperty.forName(name);
- if (pgProperty != null) {
- return getProperty(pgProperty);
- } else {
- throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
- PSQLState.INVALID_PARAMETER_VALUE);
- }
- }
-
- public void setProperty(String name, String value) throws SQLException {
- PGProperty pgProperty = PGProperty.forName(name);
- if (pgProperty != null) {
- setProperty(pgProperty, value);
- } else {
- throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
- PSQLState.INVALID_PARAMETER_VALUE);
- }
- }
-
- public String getProperty(PGProperty property) {
- return property.getOrDefault(properties);
- }
-
- public void setProperty(PGProperty property, String value) {
- if (value == null) {
- // TODO: this is not consistent with PGProperty.PROPERTY.set(prop, null)
- // PGProperty removes an entry for put(null) call, however here we just ignore null
- return;
- }
- switch (property) {
- case PG_HOST:
- setServerNames(value.split(","));
- break;
- case PG_PORT:
- String[] ps = value.split(",");
- int[] ports = new int[ps.length];
- for (int i = 0; i < ps.length; i++) {
- try {
- ports[i] = Integer.parseInt(ps[i]);
- } catch (NumberFormatException e) {
- ports[i] = 0;
- }
- }
- setPortNumbers(ports);
- break;
- case PG_DBNAME:
- setDatabaseName(value);
- break;
- case USER:
- setUser(value);
- break;
- case PASSWORD:
- setPassword(value);
- break;
- default:
- properties.setProperty(property.getName(), value);
- }
- }
-
- /**
- * Generates a reference using the appropriate object factory.
- *
- * @return reference using the appropriate object factory
- */
- protected Reference createReference() {
- return new Reference(getClass().getName(), PGObjectFactory.class.getName(), null);
- }
-
- @Override
- public Reference getReference() throws NamingException {
- Reference ref = createReference();
- StringBuilder serverString = new StringBuilder();
- for (int i = 0; i < serverNames.length; i++) {
- if (i > 0) {
- serverString.append(",");
- }
- String serverName = serverNames[i];
- serverString.append(serverName);
- }
- ref.add(new StringRefAddr("serverName", serverString.toString()));
-
- StringBuilder portString = new StringBuilder();
- for (int i = 0; i < portNumbers.length; i++) {
- if (i > 0) {
- portString.append(",");
- }
- int p = portNumbers[i];
- portString.append(Integer.toString(p));
- }
- ref.add(new StringRefAddr("portNumber", portString.toString()));
- ref.add(new StringRefAddr("databaseName", databaseName));
- if (user != null) {
- ref.add(new StringRefAddr("user", user));
- }
- if (password != null) {
- ref.add(new StringRefAddr("password", password));
- }
-
- for (PGProperty property : PGProperty.values()) {
- if (property.isPresent(properties)) {
- String value = property.getOrDefault(properties);
- ref.add(new StringRefAddr(property.getName(), value));
- }
- }
-
- return ref;
- }
-
- public void setFromReference(Reference ref) {
- databaseName = getReferenceProperty(ref, "databaseName");
- String portNumberString = getReferenceProperty(ref, "portNumber");
- if (portNumberString != null) {
- String[] ps = portNumberString.split(",");
- int[] ports = new int[ps.length];
- for (int i = 0; i < ps.length; i++) {
+ private static final Logger LOGGER = Logger.getLogger(BaseDataSource.class.getName());
+
+ /*
+ * Ensure the driver is loaded as JDBC Driver might be invisible to Java's ServiceLoader.
+ * Usually, {@code Class.forName(...)} is not required as {@link DriverManager} detects JDBC drivers
+ * via {@code META-INF/services/java.sql.Driver} entries. However there might be cases when the driver
+ * is located at the application level classloader, thus it might be required to perform manual
+ * registration of the driver.
+ */
+ static {
try {
- ports[i] = Integer.parseInt(ps[i]);
- } catch (NumberFormatException e) {
- ports[i] = 0;
+ Class.forName("org.postgresql.Driver");
+ } catch (ClassNotFoundException e) {
+ throw new IllegalStateException(
+ "BaseDataSource is unable to load org.postgresql.Driver. Please check if you have proper PostgreSQL JDBC Driver jar on the classpath",
+ e);
}
- }
- setPortNumbers(ports);
- } else {
- setPortNumbers(null);
}
- String serverName = getReferenceProperty(ref, "serverName");
- setServerNames(serverName.split(","));
- for (PGProperty property : PGProperty.values()) {
- setProperty(property, getReferenceProperty(ref, property.getName()));
+ // Standard properties, defined in the JDBC 2.0 Optional Package spec
+ private String[] serverNames = new String[]{"localhost"};
+ private String databaseName = "";
+ private String user;
+ private String password;
+ private int[] portNumbers = new int[]{0};
+ // Map for all other properties
+ private Properties properties = new Properties();
+
+ private static String getReferenceProperty(Reference ref, String propertyName) {
+ RefAddr addr = ref.get(propertyName);
+ if (addr == null) {
+ return null;
+ }
+ return (String) addr.getContent();
}
- }
- private static String getReferenceProperty(Reference ref, String propertyName) {
- RefAddr addr = ref.get(propertyName);
- if (addr == null) {
- return null;
+ /**
+ * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
+ * properties serverName, databaseName, and portNumber. The user to connect as is identified by
+ * the DataSource properties user and password.
+ *
+ * @return A valid database connection.
+ * @throws SQLException Occurs when the database connection cannot be established.
+ */
+ public Connection getConnection() throws SQLException {
+ return getConnection(user, password);
}
- return (String) addr.getContent();
- }
- protected void writeBaseObject(ObjectOutputStream out) throws IOException {
- out.writeObject(serverNames);
- out.writeObject(databaseName);
- out.writeObject(user);
- out.writeObject(password);
- out.writeObject(portNumbers);
+ /**
+ * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
+ * properties serverName, databaseName, and portNumber. The user to connect as is identified by
+ * the arguments user and password, which override the DataSource properties by the same name.
+ *
+ * @param user user
+ * @param password password
+ * @return A valid database connection.
+ * @throws SQLException Occurs when the database connection cannot be established.
+ */
+ public Connection getConnection(String user, String password)
+ throws SQLException {
+ try {
+ Connection con = DriverManager.getConnection(getUrl(), user, password);
+ if (LOGGER.isLoggable(Level.FINE)) {
+ LOGGER.log(Level.FINE, "Created a {0} for {1} at {2}",
+ new Object[]{getDescription(), user, getUrl()});
+ }
+ return con;
+ } catch (SQLException e) {
+ LOGGER.log(Level.FINE, "Failed to create a {0} for {1} at {2}: {3}",
+ new Object[]{getDescription(), user, getUrl(), e});
+ throw e;
+ }
+ }
- out.writeObject(properties);
- }
+ /**
+ * This implementation don't use a LogWriter.
+ */
+ @Override
+ public PrintWriter getLogWriter() {
+ return null;
+ }
- protected void readBaseObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
- serverNames = (String[]) in.readObject();
- databaseName = (String) in.readObject();
- user = (String) in.readObject();
- password = (String) in.readObject();
- portNumbers = (int[]) in.readObject();
+ /**
+ * This implementation don't use a LogWriter.
+ *
+ * @param printWriter Not used
+ */
+ @Override
+ public void setLogWriter(PrintWriter printWriter) {
+ // NOOP
+ }
- properties = (Properties) in.readObject();
- }
+ /**
+ * Gets the name of the host the PostgreSQL database is running on.
+ *
+ * @return name of the host the PostgreSQL database is running on
+ * @deprecated use {@link #getServerNames()}
+ */
+ @Deprecated
+ public String getServerName() {
+ return serverNames[0];
+ }
- public void initializeFrom(BaseDataSource source) throws IOException, ClassNotFoundException {
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- ObjectOutputStream oos = new ObjectOutputStream(baos);
- source.writeBaseObject(oos);
- oos.close();
- ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
- ObjectInputStream ois = new ObjectInputStream(bais);
- readBaseObject(ois);
- }
+ /**
+ * Sets the name of the host the PostgreSQL database is running on. If this is changed, it will
+ * only affect future calls to getConnection. The default value is {@code localhost}.
+ *
+ * @param serverName name of the host the PostgreSQL database is running on
+ * @deprecated use {@link #setServerNames(String[])}
+ */
+ @Deprecated
+ public void setServerName(String serverName) {
+ this.setServerNames(new String[]{serverName});
+ }
- /**
- * @return preferred query execution mode
- * @see PGProperty#PREFER_QUERY_MODE
- */
- public PreferQueryMode getPreferQueryMode() {
- return PreferQueryMode.of(PGProperty.PREFER_QUERY_MODE.getOrDefault(properties));
- }
+ /**
+ * Gets the name of the host(s) the PostgreSQL database is running on.
+ *
+ * @return name of the host(s) the PostgreSQL database is running on
+ */
+ public String[] getServerNames() {
+ return serverNames;
+ }
- /**
- * @param preferQueryMode extended, simple, extendedForPrepared, or extendedCacheEverything
- * @see PGProperty#PREFER_QUERY_MODE
- */
- public void setPreferQueryMode(PreferQueryMode preferQueryMode) {
- PGProperty.PREFER_QUERY_MODE.set(properties, preferQueryMode.value());
- }
+ /**
+ * Sets the name of the host(s) the PostgreSQL database is running on. If this is changed, it will
+ * only affect future calls to getConnection. The default value is {@code localhost}.
+ *
+ * @param serverNames name of the host(s) the PostgreSQL database is running on
+ */
+ @SuppressWarnings("nullness")
+ public void setServerNames(String[] serverNames) {
+ if (serverNames == null || serverNames.length == 0) {
+ this.serverNames = new String[]{"localhost"};
+ } else {
+ serverNames = serverNames.clone();
+ for (int i = 0; i < serverNames.length; i++) {
+ String serverName = serverNames[i];
+ if (serverName == null || "".equals(serverName)) {
+ serverNames[i] = "localhost";
+ }
+ }
+ this.serverNames = serverNames;
+ }
+ }
- /**
- * @return connection configuration regarding automatic per-query savepoints
- * @see PGProperty#AUTOSAVE
- */
- public AutoSave getAutosave() {
- return AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(properties));
- }
+ /**
+ * Gets the name of the PostgreSQL database, running on the server identified by the serverName
+ * property.
+ *
+ * @return name of the PostgreSQL database
+ */
+ public String getDatabaseName() {
+ return databaseName;
+ }
- /**
- * @param autoSave connection configuration regarding automatic per-query savepoints
- * @see PGProperty#AUTOSAVE
- */
- public void setAutosave(AutoSave autoSave) {
- PGProperty.AUTOSAVE.set(properties, autoSave.value());
- }
+ /**
+ * Sets the name of the PostgreSQL database, running on the server identified by the serverName
+ * property. If this is changed, it will only affect future calls to getConnection.
+ *
+ * @param databaseName name of the PostgreSQL database
+ */
+ public void setDatabaseName(String databaseName) {
+ this.databaseName = databaseName;
+ }
- /**
- * see PGProperty#CLEANUP_SAVEPOINTS
- *
- * @return boolean indicating property set
- */
- public boolean getCleanupSavepoints() {
- return PGProperty.CLEANUP_SAVEPOINTS.getBoolean(properties);
- }
+ /**
+ * Gets a description of this DataSource-ish thing. Must be customized by subclasses.
+ *
+ * @return description of this DataSource-ish thing
+ */
+ public abstract String getDescription();
- /**
- * see PGProperty#CLEANUP_SAVEPOINTS
- *
- * @param cleanupSavepoints will cleanup savepoints after a successful transaction
- */
- public void setCleanupSavepoints(boolean cleanupSavepoints) {
- PGProperty.CLEANUP_SAVEPOINTS.set(properties, cleanupSavepoints);
- }
+ /**
+ * Gets the user to connect as by default. If this is not specified, you must use the
+ * getConnection method which takes a user and password as parameters.
+ *
+ * @return user to connect as by default
+ */
+ public String getUser() {
+ return user;
+ }
- /**
- * @return boolean indicating property is enabled or not.
- * @see PGProperty#REWRITE_BATCHED_INSERTS
- */
- public boolean getReWriteBatchedInserts() {
- return PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(properties);
- }
+ /**
+ * Sets the user to connect as by default. If this is not specified, you must use the
+ * getConnection method which takes a user and password as parameters. If this is changed, it will
+ * only affect future calls to getConnection.
+ *
+ * @param user user to connect as by default
+ */
+ public void setUser(String user) {
+ this.user = user;
+ }
- /**
- * @param reWrite boolean value to set the property in the properties collection
- * @see PGProperty#REWRITE_BATCHED_INSERTS
- */
- public void setReWriteBatchedInserts(boolean reWrite) {
- PGProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite);
- }
+ /**
+ * Gets the password to connect with by default. If this is not specified but a password is needed
+ * to log in, you must use the getConnection method which takes a user and password as parameters.
+ *
+ * @return password to connect with by default
+ */
+ public String getPassword() {
+ return password;
+ }
- /**
- * @return boolean indicating property is enabled or not.
- * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
- */
- public boolean getHideUnprivilegedObjects() {
- return PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(properties);
- }
+ /**
+ * Sets the password to connect with by default. If this is not specified but a password is needed
+ * to log in, you must use the getConnection method which takes a user and password as parameters.
+ * If this is changed, it will only affect future calls to getConnection.
+ *
+ * @param password password to connect with by default
+ */
+ public void setPassword(String password) {
+ this.password = password;
+ }
- /**
- * @param hideUnprivileged boolean value to set the property in the properties collection
- * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
- */
- public void setHideUnprivilegedObjects(boolean hideUnprivileged) {
- PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(properties, hideUnprivileged);
- }
+ /**
+ * Gets the port which the PostgreSQL server is listening on for TCP/IP connections.
+ *
+ * @return The port, or 0 if the default port will be used.
+ * @deprecated use {@link #getPortNumbers()}
+ */
+ @Deprecated
+ public int getPortNumber() {
+ if (portNumbers == null || portNumbers.length == 0) {
+ return 0;
+ }
+ return portNumbers[0];
+ }
- public String getMaxResultBuffer() {
- return PGProperty.MAX_RESULT_BUFFER.getOrDefault(properties);
- }
+ /**
+ * Sets the port which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
+ * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
+ * the default port will be used.
+ *
+ * @param portNumber port which the PostgreSQL server is listening on for TCP/IP
+ * @deprecated use {@link #setPortNumbers(int[])}
+ */
+ @Deprecated
+ public void setPortNumber(int portNumber) {
+ setPortNumbers(new int[]{portNumber});
+ }
- public void setMaxResultBuffer(String maxResultBuffer) {
- PGProperty.MAX_RESULT_BUFFER.set(properties, maxResultBuffer);
- }
+ /**
+ * Gets the port(s) which the PostgreSQL server is listening on for TCP/IP connections.
+ *
+ * @return The port(s), or 0 if the default port will be used.
+ */
+ public int[] getPortNumbers() {
+ return portNumbers;
+ }
- public boolean getAdaptiveFetch() {
- return PGProperty.ADAPTIVE_FETCH.getBoolean(properties);
- }
+ /**
+ * Sets the port(s) which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
+ * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
+ * the default port will be used.
+ *
+ * @param portNumbers port(s) which the PostgreSQL server is listening on for TCP/IP
+ */
+ public void setPortNumbers(int[] portNumbers) {
+ if (portNumbers == null || portNumbers.length == 0) {
+ portNumbers = new int[]{0};
+ }
+ this.portNumbers = Arrays.copyOf(portNumbers, portNumbers.length);
+ }
- public void setAdaptiveFetch(boolean adaptiveFetch) {
- PGProperty.ADAPTIVE_FETCH.set(properties, adaptiveFetch);
- }
+ /**
+ * @return command line options for this connection
+ */
+ public String getOptions() {
+ return PGProperty.OPTIONS.getOrDefault(properties);
+ }
- public int getAdaptiveFetchMaximum() {
- return PGProperty.ADAPTIVE_FETCH_MAXIMUM.getIntNoCheck(properties);
- }
+ /**
+ * Set command line options for this connection
+ *
+ * @param options string to set options to
+ */
+ public void setOptions(String options) {
+ PGProperty.OPTIONS.set(properties, options);
+ }
- public void setAdaptiveFetchMaximum(int adaptiveFetchMaximum) {
- PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, adaptiveFetchMaximum);
- }
+ /**
+ * @return login timeout
+ * @see PGProperty#LOGIN_TIMEOUT
+ */
+ @Override
+ public int getLoginTimeout() {
+ return PGProperty.LOGIN_TIMEOUT.getIntNoCheck(properties);
+ }
- public int getAdaptiveFetchMinimum() {
- return PGProperty.ADAPTIVE_FETCH_MINIMUM.getIntNoCheck(properties);
- }
+ /**
+ * @param loginTimeout login timeout
+ * @see PGProperty#LOGIN_TIMEOUT
+ */
+ @Override
+ public void setLoginTimeout(int loginTimeout) {
+ PGProperty.LOGIN_TIMEOUT.set(properties, loginTimeout);
+ }
- public void setAdaptiveFetchMinimum(int adaptiveFetchMinimum) {
- PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, adaptiveFetchMinimum);
- }
+ /**
+ * @return connect timeout
+ * @see PGProperty#CONNECT_TIMEOUT
+ */
+ public int getConnectTimeout() {
+ return PGProperty.CONNECT_TIMEOUT.getIntNoCheck(properties);
+ }
- @Override
- public Logger getParentLogger() {
- return Logger.getLogger("org.postgresql");
- }
+ /**
+ * @param connectTimeout connect timeout
+ * @see PGProperty#CONNECT_TIMEOUT
+ */
+ public void setConnectTimeout(int connectTimeout) {
+ PGProperty.CONNECT_TIMEOUT.set(properties, connectTimeout);
+ }
- public String getXmlFactoryFactory() {
- return PGProperty.XML_FACTORY_FACTORY.getOrDefault(properties);
- }
+ /**
+ * @return GSS ResponseTimeout
+ * @see PGProperty#GSS_RESPONSE_TIMEOUT
+ */
+ public int getGssResponseTimeout() {
+ return PGProperty.GSS_RESPONSE_TIMEOUT.getIntNoCheck(properties);
+ }
- public void setXmlFactoryFactory(String xmlFactoryFactory) {
- PGProperty.XML_FACTORY_FACTORY.set(properties, xmlFactoryFactory);
- }
+ /**
+ * @param gssResponseTimeout gss response timeout
+ * @see PGProperty#GSS_RESPONSE_TIMEOUT
+ */
+ public void setGssResponseTimeout(int gssResponseTimeout) {
+ PGProperty.GSS_RESPONSE_TIMEOUT.set(properties, gssResponseTimeout);
+ }
- /*
- * Alias methods below, these are to help with ease-of-use with other database tools / frameworks
- * which expect normal java bean getters / setters to exist for the property names.
- */
+ /**
+ * @return SSL ResponseTimeout
+ * @see PGProperty#SSL_RESPONSE_TIMEOUT
+ */
+ public int getSslResponseTimeout() {
+ return PGProperty.SSL_RESPONSE_TIMEOUT.getIntNoCheck(properties);
+ }
- public boolean isSsl() {
- return getSsl();
- }
+ /**
+ * @param sslResponseTimeout ssl response timeout
+ * @see PGProperty#SSL_RESPONSE_TIMEOUT
+ */
+ public void setSslResponseTimeout(int sslResponseTimeout) {
+ PGProperty.SSL_RESPONSE_TIMEOUT.set(properties, sslResponseTimeout);
+ }
- public String getSslfactoryarg() {
- return getSslFactoryArg();
- }
+ /**
+ * @return protocol version
+ * @see PGProperty#PROTOCOL_VERSION
+ */
+ public int getProtocolVersion() {
+ if (!PGProperty.PROTOCOL_VERSION.isPresent(properties)) {
+ return 0;
+ } else {
+ return PGProperty.PROTOCOL_VERSION.getIntNoCheck(properties);
+ }
+ }
- public void setSslfactoryarg(final String arg) {
- setSslFactoryArg(arg);
- }
+ /**
+ * @param protocolVersion protocol version
+ * @see PGProperty#PROTOCOL_VERSION
+ */
+ public void setProtocolVersion(int protocolVersion) {
+ if (protocolVersion == 0) {
+ PGProperty.PROTOCOL_VERSION.set(properties, null);
+ } else {
+ PGProperty.PROTOCOL_VERSION.set(properties, protocolVersion);
+ }
+ }
- public String getSslcert() {
- return getSslCert();
- }
+ /**
+ * @return quoteReturningIdentifiers
+ * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
+ */
+ public boolean getQuoteReturningIdentifiers() {
+ return PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(properties);
+ }
- public void setSslcert(final String file) {
- setSslCert(file);
- }
+ /**
+ * @param quoteIdentifiers indicate whether to quote identifiers
+ * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
+ */
+ public void setQuoteReturningIdentifiers(boolean quoteIdentifiers) {
+ PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(properties, quoteIdentifiers);
+ }
- public String getSslmode() {
- return getSslMode();
- }
+ /**
+ * @return receive buffer size
+ * @see PGProperty#RECEIVE_BUFFER_SIZE
+ */
+ public int getReceiveBufferSize() {
+ return PGProperty.RECEIVE_BUFFER_SIZE.getIntNoCheck(properties);
+ }
- public void setSslmode(final String mode) {
- setSslMode(mode);
- }
+ /**
+ * @param nbytes receive buffer size
+ * @see PGProperty#RECEIVE_BUFFER_SIZE
+ */
+ public void setReceiveBufferSize(int nbytes) {
+ PGProperty.RECEIVE_BUFFER_SIZE.set(properties, nbytes);
+ }
- public String getSslhostnameverifier() {
- return getSslHostnameVerifier();
- }
+ /**
+ * @return send buffer size
+ * @see PGProperty#SEND_BUFFER_SIZE
+ */
+ public int getSendBufferSize() {
+ return PGProperty.SEND_BUFFER_SIZE.getIntNoCheck(properties);
+ }
- public void setSslhostnameverifier(final String className) {
- setSslHostnameVerifier(className);
- }
+ /**
+ * @param nbytes send buffer size
+ * @see PGProperty#SEND_BUFFER_SIZE
+ */
+ public void setSendBufferSize(int nbytes) {
+ PGProperty.SEND_BUFFER_SIZE.set(properties, nbytes);
+ }
- public String getSslkey() {
- return getSslKey();
- }
+ /**
+ * @return prepare threshold
+ * @see PGProperty#PREPARE_THRESHOLD
+ */
+ public int getPrepareThreshold() {
+ return PGProperty.PREPARE_THRESHOLD.getIntNoCheck(properties);
+ }
- public void setSslkey(final String file) {
- setSslKey(file);
- }
+ /**
+ * @param count prepare threshold
+ * @see PGProperty#PREPARE_THRESHOLD
+ */
+ public void setPrepareThreshold(int count) {
+ PGProperty.PREPARE_THRESHOLD.set(properties, count);
+ }
- public String getSslrootcert() {
- return getSslRootCert();
- }
+ /**
+ * @return prepared statement cache size (number of statements per connection)
+ * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
+ */
+ public int getPreparedStatementCacheQueries() {
+ return PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getIntNoCheck(properties);
+ }
- public void setSslrootcert(final String file) {
- setSslRootCert(file);
- }
+ /**
+ * @param cacheSize prepared statement cache size (number of statements per connection)
+ * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
+ */
+ public void setPreparedStatementCacheQueries(int cacheSize) {
+ PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.set(properties, cacheSize);
+ }
- public String getSslpasswordcallback() {
- return getSslPasswordCallback();
- }
+ /**
+ * @return prepared statement cache size (number of megabytes per connection)
+ * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
+ */
+ public int getPreparedStatementCacheSizeMiB() {
+ return PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getIntNoCheck(properties);
+ }
- public void setSslpasswordcallback(final String className) {
- setSslPasswordCallback(className);
- }
+ /**
+ * @param cacheSize statement cache size (number of megabytes per connection)
+ * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
+ */
+ public void setPreparedStatementCacheSizeMiB(int cacheSize) {
+ PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.set(properties, cacheSize);
+ }
- public String getSslpassword() {
- return getSslPassword();
- }
+ /**
+ * @return database metadata cache fields size (number of fields cached per connection)
+ * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
+ */
+ public int getDatabaseMetadataCacheFields() {
+ return PGProperty.DATABASE_METADATA_CACHE_FIELDS.getIntNoCheck(properties);
+ }
- public void setSslpassword(final String sslpassword) {
- setSslPassword(sslpassword);
- }
+ /**
+ * @param cacheSize database metadata cache fields size (number of fields cached per connection)
+ * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
+ */
+ public void setDatabaseMetadataCacheFields(int cacheSize) {
+ PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(properties, cacheSize);
+ }
- public int getRecvBufferSize() {
- return getReceiveBufferSize();
- }
+ /**
+ * @return database metadata cache fields size (number of megabytes per connection)
+ * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
+ */
+ public int getDatabaseMetadataCacheFieldsMiB() {
+ return PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getIntNoCheck(properties);
+ }
- public void setRecvBufferSize(final int nbytes) {
- setReceiveBufferSize(nbytes);
- }
+ /**
+ * @param cacheSize database metadata cache fields size (number of megabytes per connection)
+ * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
+ */
+ public void setDatabaseMetadataCacheFieldsMiB(int cacheSize) {
+ PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(properties, cacheSize);
+ }
- public boolean isAllowEncodingChanges() {
- return getAllowEncodingChanges();
- }
+ /**
+ * @return default fetch size
+ * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
+ */
+ public int getDefaultRowFetchSize() {
+ return PGProperty.DEFAULT_ROW_FETCH_SIZE.getIntNoCheck(properties);
+ }
- public boolean isLogUnclosedConnections() {
- return getLogUnclosedConnections();
- }
+ /**
+ * @param fetchSize default fetch size
+ * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
+ */
+ public void setDefaultRowFetchSize(int fetchSize) {
+ PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize);
+ }
- public boolean isTcpKeepAlive() {
- return getTcpKeepAlive();
- }
+ /**
+ * @return unknown length
+ * @see PGProperty#UNKNOWN_LENGTH
+ */
+ public int getUnknownLength() {
+ return PGProperty.UNKNOWN_LENGTH.getIntNoCheck(properties);
+ }
- public boolean isReadOnly() {
- return getReadOnly();
- }
+ /**
+ * @param unknownLength unknown length
+ * @see PGProperty#UNKNOWN_LENGTH
+ */
+ public void setUnknownLength(int unknownLength) {
+ PGProperty.UNKNOWN_LENGTH.set(properties, unknownLength);
+ }
- public boolean isDisableColumnSanitiser() {
- return getDisableColumnSanitiser();
- }
+ /**
+ * @return socket timeout
+ * @see PGProperty#SOCKET_TIMEOUT
+ */
+ public int getSocketTimeout() {
+ return PGProperty.SOCKET_TIMEOUT.getIntNoCheck(properties);
+ }
- public boolean isLoadBalanceHosts() {
- return getLoadBalanceHosts();
- }
+ /**
+ * @param seconds socket timeout
+ * @see PGProperty#SOCKET_TIMEOUT
+ */
+ public void setSocketTimeout(int seconds) {
+ PGProperty.SOCKET_TIMEOUT.set(properties, seconds);
+ }
- public boolean isCleanupSavePoints() {
- return getCleanupSavepoints();
- }
+ /**
+ * @return timeout that is used for sending cancel command in seconds
+ * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
+ */
+ public int getCancelSignalTimeout() {
+ return PGProperty.CANCEL_SIGNAL_TIMEOUT.getIntNoCheck(properties);
+ }
- public void setCleanupSavePoints(final boolean cleanupSavepoints) {
- setCleanupSavepoints(cleanupSavepoints);
- }
+ /**
+ * @param seconds timeout that is used for sending cancel command
+ * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
+ */
+ public void setCancelSignalTimeout(int seconds) {
+ PGProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds);
+ }
- public boolean isReWriteBatchedInserts() {
- return getReWriteBatchedInserts();
- }
+ /**
+ * @return true if SSL is enabled
+ * @see PGProperty#SSL
+ */
+ public boolean getSsl() {
+ // "true" if "ssl" is set but empty
+ return PGProperty.SSL.getBoolean(properties) || "".equals(PGProperty.SSL.getOrDefault(properties));
+ }
+
+ /**
+ * @return SSL factory class name
+ * @see PGProperty#SSL_FACTORY
+ */
+ public String getSslfactory() {
+ return PGProperty.SSL_FACTORY.getOrDefault(properties);
+ }
+
+ /**
+ * @param classname SSL factory class name
+ * @see PGProperty#SSL_FACTORY
+ */
+ public void setSslfactory(String classname) {
+ PGProperty.SSL_FACTORY.set(properties, classname);
+ }
+
+ /**
+ * @return SSL mode
+ * @see PGProperty#SSL_MODE
+ */
+ public String getSslMode() {
+ return PGProperty.SSL_MODE.getOrDefault(properties);
+ }
+
+ /**
+ * @param mode SSL mode
+ * @see PGProperty#SSL_MODE
+ */
+ public void setSslMode(String mode) {
+ PGProperty.SSL_MODE.set(properties, mode);
+ }
+
+ /**
+ * @return SSL mode
+ * @see PGProperty#SSL_FACTORY_ARG
+ */
+ @SuppressWarnings("deprecation")
+ public String getSslFactoryArg() {
+ return PGProperty.SSL_FACTORY_ARG.getOrDefault(properties);
+ }
+
+ /**
+ * @param arg argument forwarded to SSL factory
+ * @see PGProperty#SSL_FACTORY_ARG
+ */
+ @SuppressWarnings("deprecation")
+ public void setSslFactoryArg(String arg) {
+ PGProperty.SSL_FACTORY_ARG.set(properties, arg);
+ }
+
+ /**
+ * @return argument forwarded to SSL factory
+ * @see PGProperty#SSL_HOSTNAME_VERIFIER
+ */
+ public String getSslHostnameVerifier() {
+ return PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(properties);
+ }
+
+ /**
+ * @param className SSL hostname verifier
+ * @see PGProperty#SSL_HOSTNAME_VERIFIER
+ */
+ public void setSslHostnameVerifier(String className) {
+ PGProperty.SSL_HOSTNAME_VERIFIER.set(properties, className);
+ }
+
+ /**
+ * @return className SSL hostname verifier
+ * @see PGProperty#SSL_CERT
+ */
+ public String getSslCert() {
+ return PGProperty.SSL_CERT.getOrDefault(properties);
+ }
+
+ /**
+ * @param file SSL certificate
+ * @see PGProperty#SSL_CERT
+ */
+ public void setSslCert(String file) {
+ PGProperty.SSL_CERT.set(properties, file);
+ }
+
+ /**
+ * @return SSL certificate
+ * @see PGProperty#SSL_KEY
+ */
+ public String getSslKey() {
+ return PGProperty.SSL_KEY.getOrDefault(properties);
+ }
+
+ /**
+ * @param file SSL key
+ * @see PGProperty#SSL_KEY
+ */
+ public void setSslKey(String file) {
+ PGProperty.SSL_KEY.set(properties, file);
+ }
+
+ /**
+ * @return SSL root certificate
+ * @see PGProperty#SSL_ROOT_CERT
+ */
+ public String getSslRootCert() {
+ return PGProperty.SSL_ROOT_CERT.getOrDefault(properties);
+ }
+
+ /**
+ * @param file SSL root certificate
+ * @see PGProperty#SSL_ROOT_CERT
+ */
+ public void setSslRootCert(String file) {
+ PGProperty.SSL_ROOT_CERT.set(properties, file);
+ }
+
+ /**
+ * @return SSL password
+ * @see PGProperty#SSL_PASSWORD
+ */
+ public String getSslPassword() {
+ return PGProperty.SSL_PASSWORD.getOrDefault(properties);
+ }
+
+ /**
+ * @param password SSL password
+ * @see PGProperty#SSL_PASSWORD
+ */
+ public void setSslPassword(String password) {
+ PGProperty.SSL_PASSWORD.set(properties, password);
+ }
+
+ /**
+ * @return SSL password callback
+ * @see PGProperty#SSL_PASSWORD_CALLBACK
+ */
+ public String getSslPasswordCallback() {
+ return PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(properties);
+ }
+
+ /**
+ * @param className SSL password callback class name
+ * @see PGProperty#SSL_PASSWORD_CALLBACK
+ */
+ public void setSslPasswordCallback(String className) {
+ PGProperty.SSL_PASSWORD_CALLBACK.set(properties, className);
+ }
+
+ /**
+ * @return application name
+ * @see PGProperty#APPLICATION_NAME
+ */
+ public String getApplicationName() {
+ return PGProperty.APPLICATION_NAME.getOrDefault(properties);
+ }
+
+ /**
+ * @param applicationName application name
+ * @see PGProperty#APPLICATION_NAME
+ */
+ public void setApplicationName(String applicationName) {
+ PGProperty.APPLICATION_NAME.set(properties, applicationName);
+ }
+
+ /**
+ * @return target server type
+ * @see PGProperty#TARGET_SERVER_TYPE
+ */
+ public String getTargetServerType() {
+ return PGProperty.TARGET_SERVER_TYPE.getOrDefault(properties);
+ }
+
+ /**
+ * @param targetServerType target server type
+ * @see PGProperty#TARGET_SERVER_TYPE
+ */
+ public void setTargetServerType(String targetServerType) {
+ PGProperty.TARGET_SERVER_TYPE.set(properties, targetServerType);
+ }
+
+ /**
+ * @return load balance hosts
+ * @see PGProperty#LOAD_BALANCE_HOSTS
+ */
+ public boolean getLoadBalanceHosts() {
+ return PGProperty.LOAD_BALANCE_HOSTS.isPresent(properties);
+ }
+
+ /**
+ * @return host recheck seconds
+ * @see PGProperty#HOST_RECHECK_SECONDS
+ */
+ public int getHostRecheckSeconds() {
+ return PGProperty.HOST_RECHECK_SECONDS.getIntNoCheck(properties);
+ }
+
+ /**
+ * @param hostRecheckSeconds host recheck seconds
+ * @see PGProperty#HOST_RECHECK_SECONDS
+ */
+ public void setHostRecheckSeconds(int hostRecheckSeconds) {
+ PGProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds);
+ }
+
+ /**
+ * @return true if TCP keep alive is enabled
+ * @see PGProperty#TCP_KEEP_ALIVE
+ */
+ public boolean getTcpKeepAlive() {
+ return PGProperty.TCP_KEEP_ALIVE.getBoolean(properties);
+ }
+
+ /**
+ * @return true if TCP no delay is enabled
+ * @see PGProperty#TCP_NO_DELAY
+ */
+ public boolean getTcpNoDelay() {
+ return PGProperty.TCP_NO_DELAY.getBoolean(properties);
+ }
+
+ /**
+ * @param enabled if TCP no delay should be enabled
+ * @see PGProperty#TCP_NO_DELAY
+ */
+ public void setTcpNoDelay(boolean enabled) {
+ PGProperty.TCP_NO_DELAY.set(properties, enabled);
+ }
+
+ /**
+ * @return true if binary transfer is enabled
+ * @see PGProperty#BINARY_TRANSFER
+ */
+ public boolean getBinaryTransfer() {
+ return PGProperty.BINARY_TRANSFER.getBoolean(properties);
+ }
+
+ /**
+ * @param enabled if binary transfer should be enabled
+ * @see PGProperty#BINARY_TRANSFER
+ */
+ public void setBinaryTransfer(boolean enabled) {
+ PGProperty.BINARY_TRANSFER.set(properties, enabled);
+ }
+
+ /**
+ * @return list of OIDs that are allowed to use binary transfer
+ * @see PGProperty#BINARY_TRANSFER_ENABLE
+ */
+ public String getBinaryTransferEnable() {
+ return PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(properties);
+ }
+
+ /**
+ * @param oidList list of OIDs that are allowed to use binary transfer
+ * @see PGProperty#BINARY_TRANSFER_ENABLE
+ */
+ public void setBinaryTransferEnable(String oidList) {
+ PGProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList);
+ }
+
+ /**
+ * @return list of OIDs that are not allowed to use binary transfer
+ * @see PGProperty#BINARY_TRANSFER_DISABLE
+ */
+ public String getBinaryTransferDisable() {
+ return PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(properties);
+ }
+
+ /**
+ * @param oidList list of OIDs that are not allowed to use binary transfer
+ * @see PGProperty#BINARY_TRANSFER_DISABLE
+ */
+ public void setBinaryTransferDisable(String oidList) {
+ PGProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList);
+ }
+
+ /**
+ * @return string type
+ * @see PGProperty#STRING_TYPE
+ */
+ public String getStringType() {
+ return PGProperty.STRING_TYPE.getOrDefault(properties);
+ }
+
+ /**
+ * @param stringType string type
+ * @see PGProperty#STRING_TYPE
+ */
+ public void setStringType(String stringType) {
+ PGProperty.STRING_TYPE.set(properties, stringType);
+ }
+
+ /**
+ * @return true if column sanitizer is disabled
+ * @see PGProperty#DISABLE_COLUMN_SANITISER
+ */
+ public boolean isColumnSanitiserDisabled() {
+ return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
+ }
+
+ /**
+ * @return true if column sanitizer is disabled
+ * @see PGProperty#DISABLE_COLUMN_SANITISER
+ */
+ public boolean getDisableColumnSanitiser() {
+ return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
+ }
+
+ /**
+ * @return current schema
+ * @see PGProperty#CURRENT_SCHEMA
+ */
+ public String getCurrentSchema() {
+ return PGProperty.CURRENT_SCHEMA.getOrDefault(properties);
+ }
+
+ /**
+ * @param currentSchema current schema
+ * @see PGProperty#CURRENT_SCHEMA
+ */
+ public void setCurrentSchema(String currentSchema) {
+ PGProperty.CURRENT_SCHEMA.set(properties, currentSchema);
+ }
+
+ /**
+ * @return true if connection is readonly
+ * @see PGProperty#READ_ONLY
+ */
+ public boolean getReadOnly() {
+ return PGProperty.READ_ONLY.getBoolean(properties);
+ }
+
+ /**
+ * @return The behavior when set read only
+ * @see PGProperty#READ_ONLY_MODE
+ */
+ public String getReadOnlyMode() {
+ return PGProperty.READ_ONLY_MODE.getOrDefault(properties);
+ }
+
+ /**
+ * @param mode the behavior when set read only
+ * @see PGProperty#READ_ONLY_MODE
+ */
+ public void setReadOnlyMode(String mode) {
+ PGProperty.READ_ONLY_MODE.set(properties, mode);
+ }
+
+ /**
+ * @return true if driver should log unclosed connections
+ * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
+ */
+ public boolean getLogUnclosedConnections() {
+ return PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(properties);
+ }
+
+ /**
+ * @return true if driver should log include detail in server error messages
+ * @see PGProperty#LOG_SERVER_ERROR_DETAIL
+ */
+ public boolean getLogServerErrorDetail() {
+ return PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(properties);
+ }
+
+ /**
+ * @param enabled true if driver should include detail in server error messages
+ * @see PGProperty#LOG_SERVER_ERROR_DETAIL
+ */
+ public void setLogServerErrorDetail(boolean enabled) {
+ PGProperty.LOG_SERVER_ERROR_DETAIL.set(properties, enabled);
+ }
+
+ /**
+ * @return assumed minimal server version
+ * @see PGProperty#ASSUME_MIN_SERVER_VERSION
+ */
+ public String getAssumeMinServerVersion() {
+ return PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(properties);
+ }
+
+ /**
+ * @param minVersion assumed minimal server version
+ * @see PGProperty#ASSUME_MIN_SERVER_VERSION
+ */
+ public void setAssumeMinServerVersion(String minVersion) {
+ PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, minVersion);
+ }
+
+ /**
+ * This is important in pool-by-transaction scenarios in order to make sure that all the statements
+ * reaches the same connection that is being initialized. If set then we will group the startup
+ * parameters in a transaction
+ *
+ * @return whether to group startup parameters or not
+ * @see PGProperty#GROUP_STARTUP_PARAMETERS
+ */
+ public boolean getGroupStartupParameters() {
+ return PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(properties);
+ }
+
+ /**
+ * @param groupStartupParameters whether to group startup Parameters in a transaction or not
+ * @see PGProperty#GROUP_STARTUP_PARAMETERS
+ */
+ public void setGroupStartupParameters(boolean groupStartupParameters) {
+ PGProperty.GROUP_STARTUP_PARAMETERS.set(properties, groupStartupParameters);
+ }
+
+ /**
+ * @return JAAS application name
+ * @see PGProperty#JAAS_APPLICATION_NAME
+ */
+ public String getJaasApplicationName() {
+ return PGProperty.JAAS_APPLICATION_NAME.getOrDefault(properties);
+ }
+
+ /**
+ * @param name JAAS application name
+ * @see PGProperty#JAAS_APPLICATION_NAME
+ */
+ public void setJaasApplicationName(String name) {
+ PGProperty.JAAS_APPLICATION_NAME.set(properties, name);
+ }
+
+ /**
+ * @return true if perform JAAS login before GSS authentication
+ * @see PGProperty#JAAS_LOGIN
+ */
+ public boolean getJaasLogin() {
+ return PGProperty.JAAS_LOGIN.getBoolean(properties);
+ }
+
+ /**
+ * @param doLogin true if perform JAAS login before GSS authentication
+ * @see PGProperty#JAAS_LOGIN
+ */
+ public void setJaasLogin(boolean doLogin) {
+ PGProperty.JAAS_LOGIN.set(properties, doLogin);
+ }
+
+ /**
+ * @return Kerberos server name
+ * @see PGProperty#KERBEROS_SERVER_NAME
+ */
+ public String getKerberosServerName() {
+ return PGProperty.KERBEROS_SERVER_NAME.getOrDefault(properties);
+ }
+
+ /**
+ * @param serverName Kerberos server name
+ * @see PGProperty#KERBEROS_SERVER_NAME
+ */
+ public void setKerberosServerName(String serverName) {
+ PGProperty.KERBEROS_SERVER_NAME.set(properties, serverName);
+ }
+
+ /**
+ * @return true if use SPNEGO
+ * @see PGProperty#USE_SPNEGO
+ */
+ public boolean getUseSpNego() {
+ return PGProperty.USE_SPNEGO.getBoolean(properties);
+ }
+
+ /**
+ * @param use true if use SPNEGO
+ * @see PGProperty#USE_SPNEGO
+ */
+ public void setUseSpNego(boolean use) {
+ PGProperty.USE_SPNEGO.set(properties, use);
+ }
+
+ /**
+ * @return GSS mode: auto, sspi, or gssapi
+ * @see PGProperty#GSS_LIB
+ */
+ public String getGssLib() {
+ return PGProperty.GSS_LIB.getOrDefault(properties);
+ }
+
+ /**
+ * @param lib GSS mode: auto, sspi, or gssapi
+ * @see PGProperty#GSS_LIB
+ */
+ public void setGssLib(String lib) {
+ PGProperty.GSS_LIB.set(properties, lib);
+ }
+
+ /**
+ * @return GSS encryption mode: disable, prefer or require
+ */
+ public String getGssEncMode() {
+ return PGProperty.GSS_ENC_MODE.getOrDefault(properties);
+ }
+
+ /**
+ * @param mode encryption mode: disable, prefer or require
+ */
+ public void setGssEncMode(String mode) {
+ PGProperty.GSS_ENC_MODE.set(properties, mode);
+ }
+
+ /**
+ * @return SSPI service class
+ * @see PGProperty#SSPI_SERVICE_CLASS
+ */
+ public String getSspiServiceClass() {
+ return PGProperty.SSPI_SERVICE_CLASS.getOrDefault(properties);
+ }
+
+ /**
+ * @param serviceClass SSPI service class
+ * @see PGProperty#SSPI_SERVICE_CLASS
+ */
+ public void setSspiServiceClass(String serviceClass) {
+ PGProperty.SSPI_SERVICE_CLASS.set(properties, serviceClass);
+ }
+
+ /**
+ * @return if connection allows encoding changes
+ * @see PGProperty#ALLOW_ENCODING_CHANGES
+ */
+ public boolean getAllowEncodingChanges() {
+ return PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(properties);
+ }
+
+ /**
+ * @return socket factory class name
+ * @see PGProperty#SOCKET_FACTORY
+ */
+ public String getSocketFactory() {
+ return PGProperty.SOCKET_FACTORY.getOrDefault(properties);
+ }
+
+ /**
+ * @param socketFactoryClassName socket factory class name
+ * @see PGProperty#SOCKET_FACTORY
+ */
+ public void setSocketFactory(String socketFactoryClassName) {
+ PGProperty.SOCKET_FACTORY.set(properties, socketFactoryClassName);
+ }
+
+ /**
+ * @return socket factory argument
+ * @see PGProperty#SOCKET_FACTORY_ARG
+ */
+ @SuppressWarnings("deprecation")
+ public String getSocketFactoryArg() {
+ return PGProperty.SOCKET_FACTORY_ARG.getOrDefault(properties);
+ }
+
+ /**
+ * @param socketFactoryArg socket factory argument
+ * @see PGProperty#SOCKET_FACTORY_ARG
+ */
+ @SuppressWarnings("deprecation")
+ public void setSocketFactoryArg(String socketFactoryArg) {
+ PGProperty.SOCKET_FACTORY_ARG.set(properties, socketFactoryArg);
+ }
+
+ /**
+ * @return 'select', "callIfNoReturn', or 'call'
+ * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
+ */
+ public String getEscapeSyntaxCallMode() {
+ return PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(properties);
+ }
+
+ /**
+ * @param callMode the call mode to use for JDBC escape call syntax
+ * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
+ */
+ public void setEscapeSyntaxCallMode(String callMode) {
+ PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(properties, callMode);
+ }
+
+ /**
+ * @return null, 'database', or 'true
+ * @see PGProperty#REPLICATION
+ */
+ public String getReplication() {
+ return PGProperty.REPLICATION.getOrDefault(properties);
+ }
+
+ /**
+ * @param replication set to 'database' for logical replication or 'true' for physical replication
+ * @see PGProperty#REPLICATION
+ */
+ public void setReplication(String replication) {
+ PGProperty.REPLICATION.set(properties, replication);
+ }
+
+ /**
+ * @return the localSocketAddress
+ * @see PGProperty#LOCAL_SOCKET_ADDRESS
+ */
+ public String getLocalSocketAddress() {
+ return PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(properties);
+ }
+
+ /**
+ * @param localSocketAddress local address to bind client side to
+ * @see PGProperty#LOCAL_SOCKET_ADDRESS
+ */
+ public void setLocalSocketAddress(String localSocketAddress) {
+ PGProperty.LOCAL_SOCKET_ADDRESS.set(properties, localSocketAddress);
+ }
+
+ /**
+ * This property is no longer used by the driver and will be ignored.
+ *
+ * @return loggerLevel in properties
+ * @deprecated Configure via java.util.logging
+ */
+ @Deprecated
+ public String getLoggerLevel() {
+ return PGProperty.LOGGER_LEVEL.getOrDefault(properties);
+ }
+
+ /**
+ * This property is no longer used by the driver and will be ignored.
+ *
+ * @param loggerLevel loggerLevel to set, will be ignored
+ * @deprecated Configure via java.util.logging
+ */
+ @Deprecated
+ public void setLoggerLevel(String loggerLevel) {
+ PGProperty.LOGGER_LEVEL.set(properties, loggerLevel);
+ }
+
+ /**
+ * This property is no longer used by the driver and will be ignored.
+ *
+ * @return loggerFile in properties
+ * @deprecated Configure via java.util.logging
+ */
+ @Deprecated
+ public String getLoggerFile() {
+ ExpressionProperties exprProps = new ExpressionProperties(properties, System.getProperties());
+ return PGProperty.LOGGER_FILE.getOrDefault(exprProps);
+ }
+
+ /**
+ * This property is no longer used by the driver and will be ignored.
+ *
+ * @param loggerFile will be ignored
+ * @deprecated Configure via java.util.logging
+ */
+ @Deprecated
+ public void setLoggerFile(String loggerFile) {
+ PGProperty.LOGGER_FILE.set(properties, loggerFile);
+ }
+
+ /**
+ * Generates a {@link DriverManager} URL from the other properties supplied.
+ *
+ * @return {@link DriverManager} URL from the other properties supplied
+ */
+ public String getUrl() {
+ StringBuilder url = new StringBuilder(100);
+ url.append("jdbc:postgresql://");
+ for (int i = 0; i < serverNames.length; i++) {
+ if (i > 0) {
+ url.append(",");
+ }
+ url.append(serverNames[i]);
+ if (portNumbers != null) {
+ if (serverNames.length != portNumbers.length) {
+ throw new IllegalArgumentException(
+ String.format("Invalid argument: number of port %s entries must equal number of serverNames %s",
+ Arrays.toString(portNumbers), Arrays.toString(serverNames)));
+ }
+ if (portNumbers.length >= i && portNumbers[i] != 0) {
+ url.append(":").append(portNumbers[i]);
+ }
+
+ }
+ }
+ url.append("/");
+ if (databaseName != null) {
+ url.append(URLCoder.encode(databaseName));
+ }
+
+ StringBuilder query = new StringBuilder(100);
+ for (PGProperty property : PGProperty.values()) {
+ if (property.isPresent(properties)) {
+ if (query.length() != 0) {
+ query.append("&");
+ }
+ query.append(property.getName());
+ query.append("=");
+ String value = property.getOrDefault(properties);
+ query.append(URLCoder.encode(value));
+ }
+ }
+
+ if (query.length() > 0) {
+ url.append("?");
+ url.append(query);
+ }
+
+ return url.toString();
+ }
+
+ /**
+ * Sets properties from a {@link DriverManager} URL.
+ *
+ * @param url properties to set
+ */
+ public void setUrl(String url) {
+
+ Properties p = Driver.parseURL(url, null);
+
+ if (p == null) {
+ throw new IllegalArgumentException("URL invalid " + url);
+ }
+ for (PGProperty property : PGProperty.values()) {
+ if (!this.properties.containsKey(property.getName())) {
+ setProperty(property, property.getOrDefault(p));
+ }
+ }
+ }
+
+ /**
+ * Generates a {@link DriverManager} URL from the other properties supplied.
+ *
+ * @return {@link DriverManager} URL from the other properties supplied
+ */
+ public String getURL() {
+ return getUrl();
+ }
+
+ /**
+ * Sets properties from a {@link DriverManager} URL.
+ * Added to follow convention used in other DBMS.
+ *
+ * @param url properties to set
+ */
+ public void setURL(String url) {
+ setUrl(url);
+ }
+
+ /**
+ * @return the class name to use for the Authentication Plugin.
+ * This can be null in which case the default password authentication plugin will be used
+ */
+ public String getAuthenticationPluginClassName() {
+ return PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(properties);
+ }
+
+ /**
+ * @param className name of a class which implements {@link org.postgresql.plugin.AuthenticationPlugin}
+ * This class will be used to get the encoded bytes to be sent to the server as the
+ * password to authenticate the user.
+ */
+ public void setAuthenticationPluginClassName(String className) {
+ PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.set(properties, className);
+ }
+
+ public String getProperty(String name) throws SQLException {
+ PGProperty pgProperty = PGProperty.forName(name);
+ if (pgProperty != null) {
+ return getProperty(pgProperty);
+ } else {
+ throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ }
+
+ public void setProperty(String name, String value) throws SQLException {
+ PGProperty pgProperty = PGProperty.forName(name);
+ if (pgProperty != null) {
+ setProperty(pgProperty, value);
+ } else {
+ throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ }
+
+ public String getProperty(PGProperty property) {
+ return property.getOrDefault(properties);
+ }
+
+ public void setProperty(PGProperty property, String value) {
+ if (value == null) {
+ // TODO: this is not consistent with PGProperty.PROPERTY.set(prop, null)
+ // PGProperty removes an entry for put(null) call, however here we just ignore null
+ return;
+ }
+ switch (property) {
+ case PG_HOST:
+ setServerNames(value.split(","));
+ break;
+ case PG_PORT:
+ String[] ps = value.split(",");
+ int[] ports = new int[ps.length];
+ for (int i = 0; i < ps.length; i++) {
+ try {
+ ports[i] = Integer.parseInt(ps[i]);
+ } catch (NumberFormatException e) {
+ ports[i] = 0;
+ }
+ }
+ setPortNumbers(ports);
+ break;
+ case PG_DBNAME:
+ setDatabaseName(value);
+ break;
+ case USER:
+ setUser(value);
+ break;
+ case PASSWORD:
+ setPassword(value);
+ break;
+ default:
+ properties.setProperty(property.getName(), value);
+ }
+ }
+
+ /**
+ * Generates a reference using the appropriate object factory.
+ *
+ * @return reference using the appropriate object factory
+ */
+ protected Reference createReference() {
+ return new Reference(getClass().getName(), PGObjectFactory.class.getName(), null);
+ }
+
+ @Override
+ public Reference getReference() throws NamingException {
+ Reference ref = createReference();
+ StringBuilder serverString = new StringBuilder();
+ for (int i = 0; i < serverNames.length; i++) {
+ if (i > 0) {
+ serverString.append(",");
+ }
+ String serverName = serverNames[i];
+ serverString.append(serverName);
+ }
+ ref.add(new StringRefAddr("serverName", serverString.toString()));
+
+ StringBuilder portString = new StringBuilder();
+ for (int i = 0; i < portNumbers.length; i++) {
+ if (i > 0) {
+ portString.append(",");
+ }
+ int p = portNumbers[i];
+ portString.append(Integer.toString(p));
+ }
+ ref.add(new StringRefAddr("portNumber", portString.toString()));
+ ref.add(new StringRefAddr("databaseName", databaseName));
+ if (user != null) {
+ ref.add(new StringRefAddr("user", user));
+ }
+ if (password != null) {
+ ref.add(new StringRefAddr("password", password));
+ }
+
+ for (PGProperty property : PGProperty.values()) {
+ if (property.isPresent(properties)) {
+ String value = property.getOrDefault(properties);
+ ref.add(new StringRefAddr(property.getName(), value));
+ }
+ }
+
+ return ref;
+ }
+
+ public void setFromReference(Reference ref) {
+ databaseName = getReferenceProperty(ref, "databaseName");
+ String portNumberString = getReferenceProperty(ref, "portNumber");
+ if (portNumberString != null) {
+ String[] ps = portNumberString.split(",");
+ int[] ports = new int[ps.length];
+ for (int i = 0; i < ps.length; i++) {
+ try {
+ ports[i] = Integer.parseInt(ps[i]);
+ } catch (NumberFormatException e) {
+ ports[i] = 0;
+ }
+ }
+ setPortNumbers(ports);
+ } else {
+ setPortNumbers(null);
+ }
+ String serverName = getReferenceProperty(ref, "serverName");
+ setServerNames(serverName.split(","));
+
+ for (PGProperty property : PGProperty.values()) {
+ setProperty(property, getReferenceProperty(ref, property.getName()));
+ }
+ }
+
+ protected void writeBaseObject(ObjectOutputStream out) throws IOException {
+ out.writeObject(serverNames);
+ out.writeObject(databaseName);
+ out.writeObject(user);
+ out.writeObject(password);
+ out.writeObject(portNumbers);
+
+ out.writeObject(properties);
+ }
+
+ protected void readBaseObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+ serverNames = (String[]) in.readObject();
+ databaseName = (String) in.readObject();
+ user = (String) in.readObject();
+ password = (String) in.readObject();
+ portNumbers = (int[]) in.readObject();
+
+ properties = (Properties) in.readObject();
+ }
+
+ public void initializeFrom(BaseDataSource source) throws IOException, ClassNotFoundException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ ObjectOutputStream oos = new ObjectOutputStream(baos);
+ source.writeBaseObject(oos);
+ oos.close();
+ ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+ ObjectInputStream ois = new ObjectInputStream(bais);
+ readBaseObject(ois);
+ }
+
+ /**
+ * @return preferred query execution mode
+ * @see PGProperty#PREFER_QUERY_MODE
+ */
+ public PreferQueryMode getPreferQueryMode() {
+ return PreferQueryMode.of(PGProperty.PREFER_QUERY_MODE.getOrDefault(properties));
+ }
+
+ /**
+ * @param preferQueryMode extended, simple, extendedForPrepared, or extendedCacheEverything
+ * @see PGProperty#PREFER_QUERY_MODE
+ */
+ public void setPreferQueryMode(PreferQueryMode preferQueryMode) {
+ PGProperty.PREFER_QUERY_MODE.set(properties, preferQueryMode.value());
+ }
+
+ /**
+ * @return connection configuration regarding automatic per-query savepoints
+ * @see PGProperty#AUTOSAVE
+ */
+ public AutoSave getAutosave() {
+ return AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(properties));
+ }
+
+ /**
+ * @param autoSave connection configuration regarding automatic per-query savepoints
+ * @see PGProperty#AUTOSAVE
+ */
+ public void setAutosave(AutoSave autoSave) {
+ PGProperty.AUTOSAVE.set(properties, autoSave.value());
+ }
+
+ /**
+ * see PGProperty#CLEANUP_SAVEPOINTS
+ *
+ * @return boolean indicating property set
+ */
+ public boolean getCleanupSavepoints() {
+ return PGProperty.CLEANUP_SAVEPOINTS.getBoolean(properties);
+ }
+
+ /**
+ * see PGProperty#CLEANUP_SAVEPOINTS
+ *
+ * @param cleanupSavepoints will cleanup savepoints after a successful transaction
+ */
+ public void setCleanupSavepoints(boolean cleanupSavepoints) {
+ PGProperty.CLEANUP_SAVEPOINTS.set(properties, cleanupSavepoints);
+ }
+
+ /**
+ * @return boolean indicating property is enabled or not.
+ * @see PGProperty#REWRITE_BATCHED_INSERTS
+ */
+ public boolean getReWriteBatchedInserts() {
+ return PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(properties);
+ }
+
+ /**
+ * @return boolean indicating property is enabled or not.
+ * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
+ */
+ public boolean getHideUnprivilegedObjects() {
+ return PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(properties);
+ }
+
+ /**
+ * @param hideUnprivileged boolean value to set the property in the properties collection
+ * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
+ */
+ public void setHideUnprivilegedObjects(boolean hideUnprivileged) {
+ PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(properties, hideUnprivileged);
+ }
+
+ public String getMaxResultBuffer() {
+ return PGProperty.MAX_RESULT_BUFFER.getOrDefault(properties);
+ }
+
+ public void setMaxResultBuffer(String maxResultBuffer) {
+ PGProperty.MAX_RESULT_BUFFER.set(properties, maxResultBuffer);
+ }
+
+ public boolean getAdaptiveFetch() {
+ return PGProperty.ADAPTIVE_FETCH.getBoolean(properties);
+ }
+
+ public void setAdaptiveFetch(boolean adaptiveFetch) {
+ PGProperty.ADAPTIVE_FETCH.set(properties, adaptiveFetch);
+ }
+
+ public int getAdaptiveFetchMaximum() {
+ return PGProperty.ADAPTIVE_FETCH_MAXIMUM.getIntNoCheck(properties);
+ }
+
+ public void setAdaptiveFetchMaximum(int adaptiveFetchMaximum) {
+ PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, adaptiveFetchMaximum);
+ }
+
+ public int getAdaptiveFetchMinimum() {
+ return PGProperty.ADAPTIVE_FETCH_MINIMUM.getIntNoCheck(properties);
+ }
+
+ public void setAdaptiveFetchMinimum(int adaptiveFetchMinimum) {
+ PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, adaptiveFetchMinimum);
+ }
+
+ @Override
+ public Logger getParentLogger() {
+ return Logger.getLogger("org.postgresql");
+ }
+
+ public String getXmlFactoryFactory() {
+ return PGProperty.XML_FACTORY_FACTORY.getOrDefault(properties);
+ }
+
+ public void setXmlFactoryFactory(String xmlFactoryFactory) {
+ PGProperty.XML_FACTORY_FACTORY.set(properties, xmlFactoryFactory);
+ }
+
+ public boolean isSsl() {
+ return getSsl();
+ }
+
+ /**
+ * @param enabled if SSL is enabled
+ * @see PGProperty#SSL
+ */
+ public void setSsl(boolean enabled) {
+ if (enabled) {
+ PGProperty.SSL.set(properties, true);
+ } else {
+ PGProperty.SSL.set(properties, false);
+ }
+ }
+
+ public String getSslfactoryarg() {
+ return getSslFactoryArg();
+ }
+
+ public void setSslfactoryarg(final String arg) {
+ setSslFactoryArg(arg);
+ }
+
+ public String getSslcert() {
+ return getSslCert();
+ }
+
+ public void setSslcert(final String file) {
+ setSslCert(file);
+ }
+
+ public String getSslmode() {
+ return getSslMode();
+ }
+
+ public void setSslmode(final String mode) {
+ setSslMode(mode);
+ }
+
+ /*
+ * Alias methods below, these are to help with ease-of-use with other database tools / frameworks
+ * which expect normal java bean getters / setters to exist for the property names.
+ */
+
+ public String getSslhostnameverifier() {
+ return getSslHostnameVerifier();
+ }
+
+ public void setSslhostnameverifier(final String className) {
+ setSslHostnameVerifier(className);
+ }
+
+ public String getSslkey() {
+ return getSslKey();
+ }
+
+ public void setSslkey(final String file) {
+ setSslKey(file);
+ }
+
+ public String getSslrootcert() {
+ return getSslRootCert();
+ }
+
+ public void setSslrootcert(final String file) {
+ setSslRootCert(file);
+ }
+
+ public String getSslpasswordcallback() {
+ return getSslPasswordCallback();
+ }
+
+ public void setSslpasswordcallback(final String className) {
+ setSslPasswordCallback(className);
+ }
+
+ public String getSslpassword() {
+ return getSslPassword();
+ }
+
+ public void setSslpassword(final String sslpassword) {
+ setSslPassword(sslpassword);
+ }
+
+ public int getRecvBufferSize() {
+ return getReceiveBufferSize();
+ }
+
+ public void setRecvBufferSize(final int nbytes) {
+ setReceiveBufferSize(nbytes);
+ }
+
+ public boolean isAllowEncodingChanges() {
+ return getAllowEncodingChanges();
+ }
+
+ /**
+ * @param allow if connection allows encoding changes
+ * @see PGProperty#ALLOW_ENCODING_CHANGES
+ */
+ public void setAllowEncodingChanges(boolean allow) {
+ PGProperty.ALLOW_ENCODING_CHANGES.set(properties, allow);
+ }
+
+ public boolean isLogUnclosedConnections() {
+ return getLogUnclosedConnections();
+ }
+
+ /**
+ * @param enabled true if driver should log unclosed connections
+ * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
+ */
+ public void setLogUnclosedConnections(boolean enabled) {
+ PGProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled);
+ }
+
+ public boolean isTcpKeepAlive() {
+ return getTcpKeepAlive();
+ }
+
+ /**
+ * @param enabled if TCP keep alive should be enabled
+ * @see PGProperty#TCP_KEEP_ALIVE
+ */
+ public void setTcpKeepAlive(boolean enabled) {
+ PGProperty.TCP_KEEP_ALIVE.set(properties, enabled);
+ }
+
+ public boolean isReadOnly() {
+ return getReadOnly();
+ }
+
+ /**
+ * @param readOnly if connection should be readonly
+ * @see PGProperty#READ_ONLY
+ */
+ public void setReadOnly(boolean readOnly) {
+ PGProperty.READ_ONLY.set(properties, readOnly);
+ }
+
+ public boolean isDisableColumnSanitiser() {
+ return getDisableColumnSanitiser();
+ }
+
+ /**
+ * @param disableColumnSanitiser if column sanitizer should be disabled
+ * @see PGProperty#DISABLE_COLUMN_SANITISER
+ */
+ public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
+ PGProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser);
+ }
+
+ public boolean isLoadBalanceHosts() {
+ return getLoadBalanceHosts();
+ }
+
+ /**
+ * @param loadBalanceHosts load balance hosts
+ * @see PGProperty#LOAD_BALANCE_HOSTS
+ */
+ public void setLoadBalanceHosts(boolean loadBalanceHosts) {
+ PGProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts);
+ }
+
+ public boolean isCleanupSavePoints() {
+ return getCleanupSavepoints();
+ }
+
+ public void setCleanupSavePoints(final boolean cleanupSavepoints) {
+ setCleanupSavepoints(cleanupSavepoints);
+ }
+
+ public boolean isReWriteBatchedInserts() {
+ return getReWriteBatchedInserts();
+ }
+
+ /**
+ * @param reWrite boolean value to set the property in the properties collection
+ * @see PGProperty#REWRITE_BATCHED_INSERTS
+ */
+ public void setReWriteBatchedInserts(boolean reWrite) {
+ PGProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite);
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java b/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java
index d02613a..8002a55 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java
@@ -5,17 +5,15 @@
package org.postgresql.ds.common;
-import org.postgresql.ds.PGConnectionPoolDataSource;
-import org.postgresql.ds.PGPoolingDataSource;
-import org.postgresql.ds.PGSimpleDataSource;
-
import java.util.Hashtable;
-
import javax.naming.Context;
import javax.naming.Name;
import javax.naming.RefAddr;
import javax.naming.Reference;
import javax.naming.spi.ObjectFactory;
+import org.postgresql.ds.PGConnectionPoolDataSource;
+import org.postgresql.ds.PGPoolingDataSource;
+import org.postgresql.ds.PGSimpleDataSource;
/**
* Returns a DataSource-ish thing based on a JNDI reference. In the case of a SimpleDataSource or
@@ -27,77 +25,77 @@ import javax.naming.spi.ObjectFactory;
* @author Aaron Mulder (ammulder@chariotsolutions.com)
*/
public class PGObjectFactory implements ObjectFactory {
- /**
- * Dereferences a PostgreSQL DataSource. Other types of references are ignored.
- */
- @Override
- public Object getObjectInstance(Object obj, Name name, Context nameCtx,
- Hashtable, ?> environment) throws Exception {
- Reference ref = (Reference) obj;
- String className = ref.getClassName();
- // Old names are here for those who still use them
- if ("org.postgresql.ds.PGSimpleDataSource".equals(className)
- || "org.postgresql.jdbc2.optional.SimpleDataSource".equals(className)
- || "org.postgresql.jdbc3.Jdbc3SimpleDataSource".equals(className)) {
- return loadSimpleDataSource(ref);
- } else if ("org.postgresql.ds.PGConnectionPoolDataSource".equals(className)
- || "org.postgresql.jdbc2.optional.ConnectionPool".equals(className)
- || "org.postgresql.jdbc3.Jdbc3ConnectionPool".equals(className)) {
- return loadConnectionPool(ref);
- } else if ("org.postgresql.ds.PGPoolingDataSource".equals(className)
- || "org.postgresql.jdbc2.optional.PoolingDataSource".equals(className)
- || "org.postgresql.jdbc3.Jdbc3PoolingDataSource".equals(className)) {
- return loadPoolingDataSource(ref);
- } else {
- return null;
+ /**
+ * Dereferences a PostgreSQL DataSource. Other types of references are ignored.
+ */
+ @Override
+ public Object getObjectInstance(Object obj, Name name, Context nameCtx,
+ Hashtable, ?> environment) throws Exception {
+ Reference ref = (Reference) obj;
+ String className = ref.getClassName();
+ // Old names are here for those who still use them
+ if ("org.postgresql.ds.PGSimpleDataSource".equals(className)
+ || "org.postgresql.jdbc2.optional.SimpleDataSource".equals(className)
+ || "org.postgresql.jdbc3.Jdbc3SimpleDataSource".equals(className)) {
+ return loadSimpleDataSource(ref);
+ } else if ("org.postgresql.ds.PGConnectionPoolDataSource".equals(className)
+ || "org.postgresql.jdbc2.optional.ConnectionPool".equals(className)
+ || "org.postgresql.jdbc3.Jdbc3ConnectionPool".equals(className)) {
+ return loadConnectionPool(ref);
+ } else if ("org.postgresql.ds.PGPoolingDataSource".equals(className)
+ || "org.postgresql.jdbc2.optional.PoolingDataSource".equals(className)
+ || "org.postgresql.jdbc3.Jdbc3PoolingDataSource".equals(className)) {
+ return loadPoolingDataSource(ref);
+ } else {
+ return null;
+ }
}
- }
- @SuppressWarnings("deprecation")
- private Object loadPoolingDataSource(Reference ref) {
- // If DataSource exists, return it
- String name = getProperty(ref, "dataSourceName");
- PGPoolingDataSource pds = PGPoolingDataSource.getDataSource(name);
- if (pds != null) {
- return pds;
+ @SuppressWarnings("deprecation")
+ private Object loadPoolingDataSource(Reference ref) {
+ // If DataSource exists, return it
+ String name = getProperty(ref, "dataSourceName");
+ PGPoolingDataSource pds = PGPoolingDataSource.getDataSource(name);
+ if (pds != null) {
+ return pds;
+ }
+ // Otherwise, create a new one
+ pds = new PGPoolingDataSource();
+ pds.setDataSourceName(name);
+ loadBaseDataSource(pds, ref);
+ String min = getProperty(ref, "initialConnections");
+ if (min != null) {
+ pds.setInitialConnections(Integer.parseInt(min));
+ }
+ String max = getProperty(ref, "maxConnections");
+ if (max != null) {
+ pds.setMaxConnections(Integer.parseInt(max));
+ }
+ return pds;
}
- // Otherwise, create a new one
- pds = new PGPoolingDataSource();
- pds.setDataSourceName(name);
- loadBaseDataSource(pds, ref);
- String min = getProperty(ref, "initialConnections");
- if (min != null) {
- pds.setInitialConnections(Integer.parseInt(min));
+
+ private Object loadSimpleDataSource(Reference ref) {
+ PGSimpleDataSource ds = new PGSimpleDataSource();
+ return loadBaseDataSource(ds, ref);
}
- String max = getProperty(ref, "maxConnections");
- if (max != null) {
- pds.setMaxConnections(Integer.parseInt(max));
+
+ private Object loadConnectionPool(Reference ref) {
+ PGConnectionPoolDataSource cp = new PGConnectionPoolDataSource();
+ return loadBaseDataSource(cp, ref);
}
- return pds;
- }
- private Object loadSimpleDataSource(Reference ref) {
- PGSimpleDataSource ds = new PGSimpleDataSource();
- return loadBaseDataSource(ds, ref);
- }
+ protected Object loadBaseDataSource(BaseDataSource ds, Reference ref) {
+ ds.setFromReference(ref);
- private Object loadConnectionPool(Reference ref) {
- PGConnectionPoolDataSource cp = new PGConnectionPoolDataSource();
- return loadBaseDataSource(cp, ref);
- }
-
- protected Object loadBaseDataSource(BaseDataSource ds, Reference ref) {
- ds.setFromReference(ref);
-
- return ds;
- }
-
- protected String getProperty(Reference ref, String s) {
- RefAddr addr = ref.get(s);
- if (addr == null) {
- return null;
+ return ds;
+ }
+
+ protected String getProperty(Reference ref, String s) {
+ RefAddr addr = ref.get(s);
+ if (addr == null) {
+ return null;
+ }
+ return (String) addr.getContent();
}
- return (String) addr.getContent();
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java b/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java
index 92a8028..e474c2f 100644
--- a/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java
+++ b/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java
@@ -27,295 +27,295 @@ import java.util.logging.Level;
*
It is based around the file src/interfaces/libpq/fe-exec.c
*
* @deprecated This API is somewhat obsolete, as one may achieve similar performance
- * and greater functionality by setting up a prepared statement to define
- * the function call. Then, executing the statement with binary transmission of parameters
- * and results substitutes for a fast-path function call.
+ * and greater functionality by setting up a prepared statement to define
+ * the function call. Then, executing the statement with binary transmission of parameters
+ * and results substitutes for a fast-path function call.
*/
@Deprecated
public class Fastpath {
- // Java passes oids around as longs, but in the backend
- // it's an unsigned int, so we use this to make the conversion
- // of long -> signed int which the backend interprets as unsigned.
- private static final long NUM_OIDS = 4294967296L; // 2^32
+ // Java passes oids around as longs, but in the backend
+ // it's an unsigned int, so we use this to make the conversion
+ // of long -> signed int which the backend interprets as unsigned.
+ private static final long NUM_OIDS = 4294967296L; // 2^32
- // This maps the functions names to their id's (possible unique just
- // to a connection).
- private final Map func = new HashMap<>();
- private final QueryExecutor executor;
- private final BaseConnection connection;
+ // This maps the functions names to their id's (possible unique just
+ // to a connection).
+ private final Map func = new HashMap<>();
+ private final QueryExecutor executor;
+ private final BaseConnection connection;
- /**
- * Initialises the fastpath system.
- *
- * @param conn BaseConnection to attach to
- */
- public Fastpath(BaseConnection conn) {
- this.connection = conn;
- this.executor = conn.getQueryExecutor();
- }
-
- /**
- * Send a function call to the PostgreSQL backend.
- *
- * @param fnId Function id
- * @param resultType True if the result is a numeric (Integer or Long)
- * @param args FastpathArguments to pass to fastpath
- * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
- * otherwise
- * @throws SQLException if a database-access error occurs.
- * @deprecated please use {@link #fastpath(int, FastpathArg[])}
- */
- @Deprecated
- public Object fastpath(int fnId, boolean resultType, FastpathArg[] args)
- throws SQLException {
- // Run it.
- byte[] returnValue = fastpath(fnId, args);
-
- // Interpret results.
- if (!resultType || returnValue == null) {
- return returnValue;
+ /**
+ * Initialises the fastpath system.
+ *
+ * @param conn BaseConnection to attach to
+ */
+ public Fastpath(BaseConnection conn) {
+ this.connection = conn;
+ this.executor = conn.getQueryExecutor();
}
- if (returnValue.length == 4) {
- return ByteConverter.int4(returnValue, 0);
- } else if (returnValue.length == 8) {
- return ByteConverter.int8(returnValue, 0);
- } else {
- throw new PSQLException(
- GT.tr("Fastpath call {0} - No result was returned and we expected a numeric.", fnId),
- PSQLState.NO_DATA);
- }
- }
-
- /**
- * Send a function call to the PostgreSQL backend.
- *
- * @param fnId Function id
- * @param args FastpathArguments to pass to fastpath
- * @return null if no data, byte[] otherwise
- * @throws SQLException if a database-access error occurs.
- */
- public byte [] fastpath(int fnId, FastpathArg[] args) throws SQLException {
- // Turn fastpath array into a parameter list.
- ParameterList params = executor.createFastpathParameters(args.length);
- for (int i = 0; i < args.length; i++) {
- args[i].populateParameter(params, i + 1);
+ /**
+ * Creates a FastpathArg with an oid parameter. This is here instead of a constructor of
+ * FastpathArg because the constructor can't tell the difference between an long that's really
+ * int8 and a long thats an oid.
+ *
+ * @param oid input oid
+ * @return FastpathArg with an oid parameter
+ */
+ public static FastpathArg createOIDArg(long oid) {
+ if (oid > Integer.MAX_VALUE) {
+ oid -= NUM_OIDS;
+ }
+ return new FastpathArg((int) oid);
}
- // Run it.
- return executor.fastpathCall(fnId, params, connection.getAutoCommit());
- }
+ /**
+ * Send a function call to the PostgreSQL backend.
+ *
+ * @param fnId Function id
+ * @param resultType True if the result is a numeric (Integer or Long)
+ * @param args FastpathArguments to pass to fastpath
+ * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
+ * otherwise
+ * @throws SQLException if a database-access error occurs.
+ * @deprecated please use {@link #fastpath(int, FastpathArg[])}
+ */
+ @Deprecated
+ public Object fastpath(int fnId, boolean resultType, FastpathArg[] args)
+ throws SQLException {
+ // Run it.
+ byte[] returnValue = fastpath(fnId, args);
- /**
- * @param name Function name
- * @param resulttype True if the result is a numeric (Integer or Long)
- * @param args FastpathArguments to pass to fastpath
- * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
- * otherwise
- * @throws SQLException if something goes wrong
- * @see #fastpath(int, FastpathArg[])
- * @see #fastpath(String, FastpathArg[])
- * @deprecated Use {@link #getData(String, FastpathArg[])} if you expect a binary result, or one
- * of {@link #getInteger(String, FastpathArg[])} or
- * {@link #getLong(String, FastpathArg[])} if you expect a numeric one
- */
- @Deprecated
- public Object fastpath(String name, boolean resulttype, FastpathArg[] args)
- throws SQLException {
- connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
- return fastpath(getID(name), resulttype, args);
- }
+ // Interpret results.
+ if (!resultType || returnValue == null) {
+ return returnValue;
+ }
- /**
- *
Send a function call to the PostgreSQL backend by name.
- *
- *
Note: the mapping for the procedure name to function id needs to exist, usually to an earlier
- * call to addfunction().
- *
- *
This is the preferred method to call, as function id's can/may change between versions of the
- * backend.
- *
- *
For an example of how this works, refer to org.postgresql.largeobject.LargeObject
- *
- * @param name Function name
- * @param args FastpathArguments to pass to fastpath
- * @return null if no data, byte[] otherwise
- * @throws SQLException if name is unknown or if a database-access error occurs.
- * @see org.postgresql.largeobject.LargeObject
- */
- public byte [] fastpath(String name, FastpathArg[] args) throws SQLException {
- connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
- return fastpath(getID(name), args);
- }
-
- /**
- * This convenience method assumes that the return value is an integer.
- *
- * @param name Function name
- * @param args Function arguments
- * @return integer result
- * @throws SQLException if a database-access error occurs or no result
- */
- public int getInteger(String name, FastpathArg[] args) throws SQLException {
- byte[] returnValue = fastpath(name, args);
- if (returnValue == null) {
- throw new PSQLException(
- GT.tr("Fastpath call {0} - No result was returned and we expected an integer.", name),
- PSQLState.NO_DATA);
+ if (returnValue.length == 4) {
+ return ByteConverter.int4(returnValue, 0);
+ } else if (returnValue.length == 8) {
+ return ByteConverter.int8(returnValue, 0);
+ } else {
+ throw new PSQLException(
+ GT.tr("Fastpath call {0} - No result was returned and we expected a numeric.", fnId),
+ PSQLState.NO_DATA);
+ }
}
- if (returnValue.length == 4) {
- return ByteConverter.int4(returnValue, 0);
- } else {
- throw new PSQLException(GT.tr(
- "Fastpath call {0} - No result was returned or wrong size while expecting an integer.",
- name), PSQLState.NO_DATA);
- }
- }
+ /**
+ * Send a function call to the PostgreSQL backend.
+ *
+ * @param fnId Function id
+ * @param args FastpathArguments to pass to fastpath
+ * @return null if no data, byte[] otherwise
+ * @throws SQLException if a database-access error occurs.
+ */
+ public byte[] fastpath(int fnId, FastpathArg[] args) throws SQLException {
+ // Turn fastpath array into a parameter list.
+ ParameterList params = executor.createFastpathParameters(args.length);
+ for (int i = 0; i < args.length; i++) {
+ args[i].populateParameter(params, i + 1);
+ }
- /**
- * This convenience method assumes that the return value is a long (bigint).
- *
- * @param name Function name
- * @param args Function arguments
- * @return long result
- * @throws SQLException if a database-access error occurs or no result
- */
- public long getLong(String name, FastpathArg[] args) throws SQLException {
- byte[] returnValue = fastpath(name, args);
- if (returnValue == null) {
- throw new PSQLException(
- GT.tr("Fastpath call {0} - No result was returned and we expected a long.", name),
- PSQLState.NO_DATA);
- }
- if (returnValue.length == 8) {
- return ByteConverter.int8(returnValue, 0);
-
- } else {
- throw new PSQLException(
- GT.tr("Fastpath call {0} - No result was returned or wrong size while expecting a long.",
- name),
- PSQLState.NO_DATA);
- }
- }
-
- /**
- * This convenience method assumes that the return value is an oid.
- *
- * @param name Function name
- * @param args Function arguments
- * @return oid of the given call
- * @throws SQLException if a database-access error occurs or no result
- */
- public long getOID(String name, FastpathArg[] args) throws SQLException {
- long oid = getInteger(name, args);
- if (oid < 0) {
- oid += NUM_OIDS;
- }
- return oid;
- }
-
- /**
- * This convenience method assumes that the return value is not an Integer.
- *
- * @param name Function name
- * @param args Function arguments
- * @return byte[] array containing result
- * @throws SQLException if a database-access error occurs or no result
- */
- public byte [] getData(String name, FastpathArg[] args) throws SQLException {
- return fastpath(name, args);
- }
-
- /**
- *
This adds a function to our lookup table.
- *
- *
User code should use the addFunctions method, which is based upon a query, rather than hard
- * coding the oid. The oid for a function is not guaranteed to remain static, even on different
- * servers of the same version.
- *
- * @param name Function name
- * @param fnid Function id
- */
- public void addFunction(String name, int fnid) {
- func.put(name, fnid);
- }
-
- /**
- *
This takes a ResultSet containing two columns. Column 1 contains the function name, Column 2
- * the oid.
- *
- *
It reads the entire ResultSet, loading the values into the function table.
- *
- *
REMEMBER to close() the resultset after calling this!!
- *
- *
Implementation note about function name lookups:
- *
- *
PostgreSQL stores the function id's and their corresponding names in the pg_proc table. To
- * speed things up locally, instead of querying each function from that table when required, a
- * HashMap is used. Also, only the function's required are entered into this table, keeping
- * connection times as fast as possible.
- *
- *
The org.postgresql.largeobject.LargeObject class performs a query upon it's startup, and passes
- * the returned ResultSet to the addFunctions() method here.
- *
- *
Once this has been done, the LargeObject api refers to the functions by name.
- *
- *
Don't think that manually converting them to the oid's will work. Ok, they will for now, but
- * they can change during development (there was some discussion about this for V7.0), so this is
- * implemented to prevent any unwarranted headaches in the future.
This returns the function id associated by its name.
- *
- *
If addFunction() or addFunctions() have not been called for this name, then an SQLException is
- * thrown.
- *
- * @param name Function name to lookup
- * @return Function ID for fastpath call
- * @throws SQLException is function is unknown.
- */
- public int getID(String name) throws SQLException {
- Integer id = func.get(name);
-
- // may be we could add a lookup to the database here, and store the result
- // in our lookup table, throwing the exception if that fails.
- // We must, however, ensure that if we do, any existing ResultSet is
- // unaffected, otherwise we could break user code.
- //
- // so, until we know we can do this (needs testing, on the TODO list)
- // for now, we throw the exception and do no lookups.
- if (id == null) {
- throw new PSQLException(GT.tr("The fastpath function {0} is unknown.", name),
- PSQLState.UNEXPECTED_ERROR);
+ // Run it.
+ return executor.fastpathCall(fnId, params, connection.getAutoCommit());
}
- return id;
- }
-
- /**
- * Creates a FastpathArg with an oid parameter. This is here instead of a constructor of
- * FastpathArg because the constructor can't tell the difference between an long that's really
- * int8 and a long thats an oid.
- *
- * @param oid input oid
- * @return FastpathArg with an oid parameter
- */
- public static FastpathArg createOIDArg(long oid) {
- if (oid > Integer.MAX_VALUE) {
- oid -= NUM_OIDS;
+ /**
+ * @param name Function name
+ * @param resulttype True if the result is a numeric (Integer or Long)
+ * @param args FastpathArguments to pass to fastpath
+ * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
+ * otherwise
+ * @throws SQLException if something goes wrong
+ * @see #fastpath(int, FastpathArg[])
+ * @see #fastpath(String, FastpathArg[])
+ * @deprecated Use {@link #getData(String, FastpathArg[])} if you expect a binary result, or one
+ * of {@link #getInteger(String, FastpathArg[])} or
+ * {@link #getLong(String, FastpathArg[])} if you expect a numeric one
+ */
+ @Deprecated
+ public Object fastpath(String name, boolean resulttype, FastpathArg[] args)
+ throws SQLException {
+ connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
+ return fastpath(getID(name), resulttype, args);
+ }
+
+ /**
+ *
Send a function call to the PostgreSQL backend by name.
+ *
+ *
Note: the mapping for the procedure name to function id needs to exist, usually to an earlier
+ * call to addfunction().
+ *
+ *
This is the preferred method to call, as function id's can/may change between versions of the
+ * backend.
+ *
+ *
For an example of how this works, refer to org.postgresql.largeobject.LargeObject
+ *
+ * @param name Function name
+ * @param args FastpathArguments to pass to fastpath
+ * @return null if no data, byte[] otherwise
+ * @throws SQLException if name is unknown or if a database-access error occurs.
+ * @see org.postgresql.largeobject.LargeObject
+ */
+ public byte[] fastpath(String name, FastpathArg[] args) throws SQLException {
+ connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
+ return fastpath(getID(name), args);
+ }
+
+ /**
+ * This convenience method assumes that the return value is an integer.
+ *
+ * @param name Function name
+ * @param args Function arguments
+ * @return integer result
+ * @throws SQLException if a database-access error occurs or no result
+ */
+ public int getInteger(String name, FastpathArg[] args) throws SQLException {
+ byte[] returnValue = fastpath(name, args);
+ if (returnValue == null) {
+ throw new PSQLException(
+ GT.tr("Fastpath call {0} - No result was returned and we expected an integer.", name),
+ PSQLState.NO_DATA);
+ }
+
+ if (returnValue.length == 4) {
+ return ByteConverter.int4(returnValue, 0);
+ } else {
+ throw new PSQLException(GT.tr(
+ "Fastpath call {0} - No result was returned or wrong size while expecting an integer.",
+ name), PSQLState.NO_DATA);
+ }
+ }
+
+ /**
+ * This convenience method assumes that the return value is a long (bigint).
+ *
+ * @param name Function name
+ * @param args Function arguments
+ * @return long result
+ * @throws SQLException if a database-access error occurs or no result
+ */
+ public long getLong(String name, FastpathArg[] args) throws SQLException {
+ byte[] returnValue = fastpath(name, args);
+ if (returnValue == null) {
+ throw new PSQLException(
+ GT.tr("Fastpath call {0} - No result was returned and we expected a long.", name),
+ PSQLState.NO_DATA);
+ }
+ if (returnValue.length == 8) {
+ return ByteConverter.int8(returnValue, 0);
+
+ } else {
+ throw new PSQLException(
+ GT.tr("Fastpath call {0} - No result was returned or wrong size while expecting a long.",
+ name),
+ PSQLState.NO_DATA);
+ }
+ }
+
+ /**
+ * This convenience method assumes that the return value is an oid.
+ *
+ * @param name Function name
+ * @param args Function arguments
+ * @return oid of the given call
+ * @throws SQLException if a database-access error occurs or no result
+ */
+ public long getOID(String name, FastpathArg[] args) throws SQLException {
+ long oid = getInteger(name, args);
+ if (oid < 0) {
+ oid += NUM_OIDS;
+ }
+ return oid;
+ }
+
+ /**
+ * This convenience method assumes that the return value is not an Integer.
+ *
+ * @param name Function name
+ * @param args Function arguments
+ * @return byte[] array containing result
+ * @throws SQLException if a database-access error occurs or no result
+ */
+ public byte[] getData(String name, FastpathArg[] args) throws SQLException {
+ return fastpath(name, args);
+ }
+
+ /**
+ *
This adds a function to our lookup table.
+ *
+ *
User code should use the addFunctions method, which is based upon a query, rather than hard
+ * coding the oid. The oid for a function is not guaranteed to remain static, even on different
+ * servers of the same version.
+ *
+ * @param name Function name
+ * @param fnid Function id
+ */
+ public void addFunction(String name, int fnid) {
+ func.put(name, fnid);
+ }
+
+ /**
+ *
This takes a ResultSet containing two columns. Column 1 contains the function name, Column 2
+ * the oid.
+ *
+ *
It reads the entire ResultSet, loading the values into the function table.
+ *
+ *
REMEMBER to close() the resultset after calling this!!
+ *
+ *
Implementation note about function name lookups:
+ *
+ *
PostgreSQL stores the function id's and their corresponding names in the pg_proc table. To
+ * speed things up locally, instead of querying each function from that table when required, a
+ * HashMap is used. Also, only the function's required are entered into this table, keeping
+ * connection times as fast as possible.
+ *
+ *
The org.postgresql.largeobject.LargeObject class performs a query upon it's startup, and passes
+ * the returned ResultSet to the addFunctions() method here.
+ *
+ *
Once this has been done, the LargeObject api refers to the functions by name.
+ *
+ *
Don't think that manually converting them to the oid's will work. Ok, they will for now, but
+ * they can change during development (there was some discussion about this for V7.0), so this is
+ * implemented to prevent any unwarranted headaches in the future.
This returns the function id associated by its name.
+ *
+ *
If addFunction() or addFunctions() have not been called for this name, then an SQLException is
+ * thrown.
+ *
+ * @param name Function name to lookup
+ * @return Function ID for fastpath call
+ * @throws SQLException is function is unknown.
+ */
+ public int getID(String name) throws SQLException {
+ Integer id = func.get(name);
+
+ // may be we could add a lookup to the database here, and store the result
+ // in our lookup table, throwing the exception if that fails.
+ // We must, however, ensure that if we do, any existing ResultSet is
+ // unaffected, otherwise we could break user code.
+ //
+ // so, until we know we can do this (needs testing, on the TODO list)
+ // for now, we throw the exception and do no lookups.
+ if (id == null) {
+ throw new PSQLException(GT.tr("The fastpath function {0} is unknown.", name),
+ PSQLState.UNEXPECTED_ERROR);
+ }
+
+ return id;
}
- return new FastpathArg((int) oid);
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java b/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java
index a739a29..1fab968 100644
--- a/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java
+++ b/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java
@@ -20,107 +20,107 @@ import java.sql.SQLException;
* being called.
*
* @deprecated This API is somewhat obsolete, as one may achieve similar performance
- * and greater functionality by setting up a prepared statement to define
- * the function call. Then, executing the statement with binary transmission of parameters
- * and results substitutes for a fast-path function call.
+ * and greater functionality by setting up a prepared statement to define
+ * the function call. Then, executing the statement with binary transmission of parameters
+ * and results substitutes for a fast-path function call.
*/
@Deprecated
public class FastpathArg {
- /**
- * Encoded byte value of argument.
- */
- private final byte [] bytes;
- private final int bytesStart;
- private final int bytesLength;
+ /**
+ * Encoded byte value of argument.
+ */
+ private final byte[] bytes;
+ private final int bytesStart;
+ private final int bytesLength;
- static class ByteStreamWriterFastpathArg extends FastpathArg {
- private final ByteStreamWriter writer;
-
- ByteStreamWriterFastpathArg(ByteStreamWriter writer) {
- super(null, 0, 0);
- this.writer = writer;
+ /**
+ * Constructs an argument that consists of an integer value.
+ *
+ * @param value int value to set
+ */
+ public FastpathArg(int value) {
+ bytes = new byte[4];
+ bytes[3] = (byte) (value);
+ bytes[2] = (byte) (value >> 8);
+ bytes[1] = (byte) (value >> 16);
+ bytes[0] = (byte) (value >> 24);
+ bytesStart = 0;
+ bytesLength = 4;
+ }
+
+ /**
+ * Constructs an argument that consists of an integer value.
+ *
+ * @param value int value to set
+ */
+ public FastpathArg(long value) {
+ bytes = new byte[8];
+ bytes[7] = (byte) (value);
+ bytes[6] = (byte) (value >> 8);
+ bytes[5] = (byte) (value >> 16);
+ bytes[4] = (byte) (value >> 24);
+ bytes[3] = (byte) (value >> 32);
+ bytes[2] = (byte) (value >> 40);
+ bytes[1] = (byte) (value >> 48);
+ bytes[0] = (byte) (value >> 56);
+ bytesStart = 0;
+ bytesLength = 8;
+ }
+
+ /**
+ * Constructs an argument that consists of an array of bytes.
+ *
+ * @param bytes array to store
+ */
+ public FastpathArg(byte[] bytes) {
+ this(bytes, 0, bytes.length);
+ }
+
+ /**
+ * Constructs an argument that consists of part of a byte array.
+ *
+ * @param buf source array
+ * @param off offset within array
+ * @param len length of data to include
+ */
+ public FastpathArg(byte[] buf, int off, int len) {
+ this.bytes = buf;
+ this.bytesStart = off;
+ this.bytesLength = len;
+ }
+
+ /**
+ * Constructs an argument that consists of a String.
+ *
+ * @param s String to store
+ */
+ public FastpathArg(String s) {
+ this(s.getBytes());
+ }
+
+ public static FastpathArg of(ByteStreamWriter writer) {
+ return new ByteStreamWriterFastpathArg(writer);
}
- @Override
void populateParameter(ParameterList params, int index) throws SQLException {
- params.setBytea(index, writer);
+ if (bytes == null) {
+ params.setNull(index, 0);
+ } else {
+ params.setBytea(index, bytes, bytesStart, bytesLength);
+ }
}
- }
- /**
- * Constructs an argument that consists of an integer value.
- *
- * @param value int value to set
- */
- public FastpathArg(int value) {
- bytes = new byte[4];
- bytes[3] = (byte) (value);
- bytes[2] = (byte) (value >> 8);
- bytes[1] = (byte) (value >> 16);
- bytes[0] = (byte) (value >> 24);
- bytesStart = 0;
- bytesLength = 4;
- }
+ static class ByteStreamWriterFastpathArg extends FastpathArg {
+ private final ByteStreamWriter writer;
- /**
- * Constructs an argument that consists of an integer value.
- *
- * @param value int value to set
- */
- public FastpathArg(long value) {
- bytes = new byte[8];
- bytes[7] = (byte) (value);
- bytes[6] = (byte) (value >> 8);
- bytes[5] = (byte) (value >> 16);
- bytes[4] = (byte) (value >> 24);
- bytes[3] = (byte) (value >> 32);
- bytes[2] = (byte) (value >> 40);
- bytes[1] = (byte) (value >> 48);
- bytes[0] = (byte) (value >> 56);
- bytesStart = 0;
- bytesLength = 8;
- }
+ ByteStreamWriterFastpathArg(ByteStreamWriter writer) {
+ super(null, 0, 0);
+ this.writer = writer;
+ }
- /**
- * Constructs an argument that consists of an array of bytes.
- *
- * @param bytes array to store
- */
- public FastpathArg(byte[] bytes) {
- this(bytes, 0, bytes.length);
- }
-
- /**
- * Constructs an argument that consists of part of a byte array.
- *
- * @param buf source array
- * @param off offset within array
- * @param len length of data to include
- */
- public FastpathArg(byte [] buf, int off, int len) {
- this.bytes = buf;
- this.bytesStart = off;
- this.bytesLength = len;
- }
-
- /**
- * Constructs an argument that consists of a String.
- *
- * @param s String to store
- */
- public FastpathArg(String s) {
- this(s.getBytes());
- }
-
- public static FastpathArg of(ByteStreamWriter writer) {
- return new ByteStreamWriterFastpathArg(writer);
- }
-
- void populateParameter(ParameterList params, int index) throws SQLException {
- if (bytes == null) {
- params.setNull(index, 0);
- } else {
- params.setBytea(index, bytes, bytesStart, bytesLength);
+ @Override
+ void populateParameter(ParameterList params, int index) throws SQLException {
+ params.setBytea(index, writer);
+ }
}
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java
index 7127a41..54b393c 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java
@@ -20,181 +20,181 @@ import java.sql.SQLException;
*/
@SuppressWarnings("serial")
public class PGbox extends PGobject implements PGBinaryObject, Serializable, Cloneable {
- /**
- * These are the two points.
- */
- public PGpoint [] point;
+ /**
+ * These are the two points.
+ */
+ public PGpoint[] point;
- /**
- * @param x1 first x coordinate
- * @param y1 first y coordinate
- * @param x2 second x coordinate
- * @param y2 second y coordinate
- */
- public PGbox(double x1, double y1, double x2, double y2) {
- this(new PGpoint(x1, y1), new PGpoint(x2, y2));
- }
-
- /**
- * @param p1 first point
- * @param p2 second point
- */
- public PGbox(PGpoint p1, PGpoint p2) {
- this();
- this.point = new PGpoint[]{p1, p2};
- }
-
- /**
- * @param s Box definition in PostgreSQL syntax
- * @throws SQLException if definition is invalid
- */
- public PGbox(String s) throws SQLException {
- this();
- setValue(s);
- }
-
- /**
- * Required constructor.
- */
- public PGbox() {
- type = "box";
- }
-
- /**
- * This method sets the value of this object. It should be overridden, but still called by
- * subclasses.
- *
- * @param value a string representation of the value of the object
- * @throws SQLException thrown if value is invalid for this type
- */
- @Override
- public void setValue(String value) throws SQLException {
- if (value == null) {
- this.point = null;
- return;
- }
- PGtokenizer t = new PGtokenizer(value, ',');
- if (t.getSize() != 2) {
- throw new PSQLException(
- GT.tr("Conversion to type {0} failed: {1}.", type, value),
- PSQLState.DATA_TYPE_MISMATCH);
+ /**
+ * @param x1 first x coordinate
+ * @param y1 first y coordinate
+ * @param x2 second x coordinate
+ * @param y2 second y coordinate
+ */
+ public PGbox(double x1, double y1, double x2, double y2) {
+ this(new PGpoint(x1, y1), new PGpoint(x2, y2));
}
- PGpoint[] point = this.point;
- if (point == null) {
- this.point = point = new PGpoint[2];
- }
- point[0] = new PGpoint(t.getToken(0));
- point[1] = new PGpoint(t.getToken(1));
- }
-
- /**
- * @param b Definition of this point in PostgreSQL's binary syntax
- */
- @Override
- public void setByteValue(byte[] b, int offset) {
- PGpoint[] point = this.point;
- if (point == null) {
- this.point = point = new PGpoint[2];
- }
- point[0] = new PGpoint();
- point[0].setByteValue(b, offset);
- point[1] = new PGpoint();
- point[1].setByteValue(b, offset + point[0].lengthInBytes());
- this.point = point;
- }
-
- /**
- * @param obj Object to compare with
- * @return true if the two boxes are identical
- */
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof PGbox) {
- PGbox p = (PGbox) obj;
-
- // Same points.
- PGpoint[] point = this.point;
- PGpoint[] pPoint = p.point;
- if (point == null) {
- return pPoint == null;
- } else if (pPoint == null) {
- return false;
- }
-
- if (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1])) {
- return true;
- }
-
- // Points swapped.
- if (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0])) {
- return true;
- }
-
- // Using the opposite two points of the box:
- // (x1,y1),(x2,y2) -> (x1,y2),(x2,y1)
- if (pPoint[0].x == point[0].x && pPoint[0].y == point[1].y
- && pPoint[1].x == point[1].x && pPoint[1].y == point[0].y) {
- return true;
- }
-
- // Using the opposite two points of the box, and the points are swapped
- // (x1,y1),(x2,y2) -> (x2,y1),(x1,y2)
- if (pPoint[0].x == point[1].x && pPoint[0].y == point[0].y
- && pPoint[1].x == point[0].x && pPoint[1].y == point[1].y) {
- return true;
- }
+ /**
+ * @param p1 first point
+ * @param p2 second point
+ */
+ public PGbox(PGpoint p1, PGpoint p2) {
+ this();
+ this.point = new PGpoint[]{p1, p2};
}
- return false;
- }
+ /**
+ * @param s Box definition in PostgreSQL syntax
+ * @throws SQLException if definition is invalid
+ */
+ public PGbox(String s) throws SQLException {
+ this();
+ setValue(s);
+ }
- @Override
- public int hashCode() {
- // This relies on the behaviour of point's hashcode being an exclusive-OR of
- // its X and Y components; we end up with an exclusive-OR of the two X and
- // two Y components, which is equal whenever equals() would return true
- // since xor is commutative.
- PGpoint[] point = this.point;
- return point == null ? 0 : point[0].hashCode() ^ point[1].hashCode();
- }
+ /**
+ * Required constructor.
+ */
+ public PGbox() {
+ type = "box";
+ }
- @Override
- public Object clone() throws CloneNotSupportedException {
- PGbox newPGbox = (PGbox) super.clone();
- if (newPGbox.point != null) {
- newPGbox.point = newPGbox.point.clone();
- for (int i = 0; i < newPGbox.point.length; i++) {
- if (newPGbox.point[i] != null) {
- newPGbox.point[i] = (PGpoint) newPGbox.point[i].clone();
+ /**
+ * @param b Definition of this point in PostgreSQL's binary syntax
+ */
+ @Override
+ public void setByteValue(byte[] b, int offset) {
+ PGpoint[] point = this.point;
+ if (point == null) {
+ this.point = point = new PGpoint[2];
}
- }
+ point[0] = new PGpoint();
+ point[0].setByteValue(b, offset);
+ point[1] = new PGpoint();
+ point[1].setByteValue(b, offset + point[0].lengthInBytes());
+ this.point = point;
}
- return newPGbox;
- }
- /**
- * @return the PGbox in the syntax expected by org.postgresql
- */
- @Override
- public String getValue() {
- PGpoint[] point = this.point;
- return point == null ? null : point[0].toString() + "," + point[1].toString();
- }
+ /**
+ * @param obj Object to compare with
+ * @return true if the two boxes are identical
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof PGbox) {
+ PGbox p = (PGbox) obj;
- @Override
- public int lengthInBytes() {
- PGpoint[] point = this.point;
- if (point == null) {
- return 0;
+ // Same points.
+ PGpoint[] point = this.point;
+ PGpoint[] pPoint = p.point;
+ if (point == null) {
+ return pPoint == null;
+ } else if (pPoint == null) {
+ return false;
+ }
+
+ if (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1])) {
+ return true;
+ }
+
+ // Points swapped.
+ if (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0])) {
+ return true;
+ }
+
+ // Using the opposite two points of the box:
+ // (x1,y1),(x2,y2) -> (x1,y2),(x2,y1)
+ if (pPoint[0].x == point[0].x && pPoint[0].y == point[1].y
+ && pPoint[1].x == point[1].x && pPoint[1].y == point[0].y) {
+ return true;
+ }
+
+ // Using the opposite two points of the box, and the points are swapped
+ // (x1,y1),(x2,y2) -> (x2,y1),(x1,y2)
+ if (pPoint[0].x == point[1].x && pPoint[0].y == point[0].y
+ && pPoint[1].x == point[0].x && pPoint[1].y == point[1].y) {
+ return true;
+ }
+ }
+
+ return false;
}
- return point[0].lengthInBytes() + point[1].lengthInBytes();
- }
- @Override
- public void toBytes(byte[] bytes, int offset) {
- PGpoint[] point = this.point;
- point[0].toBytes(bytes, offset);
- point[1].toBytes(bytes, offset + point[0].lengthInBytes());
- }
+ @Override
+ public int hashCode() {
+ // This relies on the behaviour of point's hashcode being an exclusive-OR of
+ // its X and Y components; we end up with an exclusive-OR of the two X and
+ // two Y components, which is equal whenever equals() would return true
+ // since xor is commutative.
+ PGpoint[] point = this.point;
+ return point == null ? 0 : point[0].hashCode() ^ point[1].hashCode();
+ }
+
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ PGbox newPGbox = (PGbox) super.clone();
+ if (newPGbox.point != null) {
+ newPGbox.point = newPGbox.point.clone();
+ for (int i = 0; i < newPGbox.point.length; i++) {
+ if (newPGbox.point[i] != null) {
+ newPGbox.point[i] = (PGpoint) newPGbox.point[i].clone();
+ }
+ }
+ }
+ return newPGbox;
+ }
+
+ /**
+ * @return the PGbox in the syntax expected by org.postgresql
+ */
+ @Override
+ public String getValue() {
+ PGpoint[] point = this.point;
+ return point == null ? null : point[0].toString() + "," + point[1].toString();
+ }
+
+ /**
+ * This method sets the value of this object. It should be overridden, but still called by
+ * subclasses.
+ *
+ * @param value a string representation of the value of the object
+ * @throws SQLException thrown if value is invalid for this type
+ */
+ @Override
+ public void setValue(String value) throws SQLException {
+ if (value == null) {
+ this.point = null;
+ return;
+ }
+ PGtokenizer t = new PGtokenizer(value, ',');
+ if (t.getSize() != 2) {
+ throw new PSQLException(
+ GT.tr("Conversion to type {0} failed: {1}.", type, value),
+ PSQLState.DATA_TYPE_MISMATCH);
+ }
+
+ PGpoint[] point = this.point;
+ if (point == null) {
+ this.point = point = new PGpoint[2];
+ }
+ point[0] = new PGpoint(t.getToken(0));
+ point[1] = new PGpoint(t.getToken(1));
+ }
+
+ @Override
+ public int lengthInBytes() {
+ PGpoint[] point = this.point;
+ if (point == null) {
+ return 0;
+ }
+ return point[0].lengthInBytes() + point[1].lengthInBytes();
+ }
+
+ @Override
+ public void toBytes(byte[] bytes, int offset) {
+ PGpoint[] point = this.point;
+ point[0].toBytes(bytes, offset);
+ point[1].toBytes(bytes, offset + point[0].lengthInBytes());
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java
index 995023a..ddbced8 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java
@@ -19,122 +19,122 @@ import java.sql.SQLException;
*/
@SuppressWarnings("serial")
public class PGcircle extends PGobject implements Serializable, Cloneable {
- /**
- * This is the center point.
- */
- public PGpoint center;
+ /**
+ * This is the center point.
+ */
+ public PGpoint center;
- /**
- * This is the radius.
- */
- public double radius;
+ /**
+ * This is the radius.
+ */
+ public double radius;
- /**
- * @param x coordinate of center
- * @param y coordinate of center
- * @param r radius of circle
- */
- public PGcircle(double x, double y, double r) {
- this(new PGpoint(x, y), r);
- }
-
- /**
- * @param c PGpoint describing the circle's center
- * @param r radius of circle
- */
- public PGcircle(PGpoint c, double r) {
- this();
- this.center = c;
- this.radius = r;
- }
-
- /**
- * @param s definition of the circle in PostgreSQL's syntax.
- * @throws SQLException on conversion failure
- */
- public PGcircle(String s) throws SQLException {
- this();
- setValue(s);
- }
-
- /**
- * This constructor is used by the driver.
- */
- public PGcircle() {
- type = "circle";
- }
-
- /**
- * @param s definition of the circle in PostgreSQL's syntax.
- * @throws SQLException on conversion failure
- */
- @Override
- public void setValue(String s) throws SQLException {
- if (s == null) {
- center = null;
- return;
- }
- PGtokenizer t = new PGtokenizer(PGtokenizer.removeAngle(s), ',');
- if (t.getSize() != 2) {
- throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
- PSQLState.DATA_TYPE_MISMATCH);
+ /**
+ * @param x coordinate of center
+ * @param y coordinate of center
+ * @param r radius of circle
+ */
+ public PGcircle(double x, double y, double r) {
+ this(new PGpoint(x, y), r);
}
- try {
- center = new PGpoint(t.getToken(0));
- radius = Double.parseDouble(t.getToken(1));
- } catch (NumberFormatException e) {
- throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
- PSQLState.DATA_TYPE_MISMATCH, e);
+ /**
+ * @param c PGpoint describing the circle's center
+ * @param r radius of circle
+ */
+ public PGcircle(PGpoint c, double r) {
+ this();
+ this.center = c;
+ this.radius = r;
}
- }
- /**
- * @param obj Object to compare with
- * @return true if the two circles are identical
- */
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof PGcircle) {
- PGcircle p = (PGcircle) obj;
- PGpoint center = this.center;
- PGpoint pCenter = p.center;
- if (center == null) {
- return pCenter == null;
- } else if (pCenter == null) {
+ /**
+ * @param s definition of the circle in PostgreSQL's syntax.
+ * @throws SQLException on conversion failure
+ */
+ public PGcircle(String s) throws SQLException {
+ this();
+ setValue(s);
+ }
+
+ /**
+ * This constructor is used by the driver.
+ */
+ public PGcircle() {
+ type = "circle";
+ }
+
+ /**
+ * @param obj Object to compare with
+ * @return true if the two circles are identical
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof PGcircle) {
+ PGcircle p = (PGcircle) obj;
+ PGpoint center = this.center;
+ PGpoint pCenter = p.center;
+ if (center == null) {
+ return pCenter == null;
+ } else if (pCenter == null) {
+ return false;
+ }
+
+ return p.radius == radius && equals(pCenter, center);
+ }
return false;
- }
-
- return p.radius == radius && equals(pCenter, center);
}
- return false;
- }
- @Override
- public int hashCode() {
- if (center == null) {
- return 0;
+ @Override
+ public int hashCode() {
+ if (center == null) {
+ return 0;
+ }
+ long bits = Double.doubleToLongBits(radius);
+ int v = (int) (bits ^ (bits >>> 32));
+ v = v * 31 + center.hashCode();
+ return v;
}
- long bits = Double.doubleToLongBits(radius);
- int v = (int) (bits ^ (bits >>> 32));
- v = v * 31 + center.hashCode();
- return v;
- }
- @Override
- public Object clone() throws CloneNotSupportedException {
- PGcircle newPGcircle = (PGcircle) super.clone();
- if (newPGcircle.center != null) {
- newPGcircle.center = (PGpoint) newPGcircle.center.clone();
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ PGcircle newPGcircle = (PGcircle) super.clone();
+ if (newPGcircle.center != null) {
+ newPGcircle.center = (PGpoint) newPGcircle.center.clone();
+ }
+ return newPGcircle;
}
- return newPGcircle;
- }
- /**
- * @return the PGcircle in the syntax expected by org.postgresql
- */
- @Override
- public String getValue() {
- return center == null ? null : "<" + center + "," + radius + ">";
- }
+ /**
+ * @return the PGcircle in the syntax expected by org.postgresql
+ */
+ @Override
+ public String getValue() {
+ return center == null ? null : "<" + center + "," + radius + ">";
+ }
+
+ /**
+ * @param s definition of the circle in PostgreSQL's syntax.
+ * @throws SQLException on conversion failure
+ */
+ @Override
+ public void setValue(String s) throws SQLException {
+ if (s == null) {
+ center = null;
+ return;
+ }
+ PGtokenizer t = new PGtokenizer(PGtokenizer.removeAngle(s), ',');
+ if (t.getSize() != 2) {
+ throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+ PSQLState.DATA_TYPE_MISMATCH);
+ }
+
+ try {
+ center = new PGpoint(t.getToken(0));
+ radius = Double.parseDouble(t.getToken(1));
+ } catch (NumberFormatException e) {
+ throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+ PSQLState.DATA_TYPE_MISMATCH, e);
+ }
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java
index 9ee8ffb..57ffb0b 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java
@@ -20,195 +20,195 @@ import java.sql.SQLException;
@SuppressWarnings("serial")
public class PGline extends PGobject implements Serializable, Cloneable {
- /**
- * Coefficient of x.
- */
- public double a;
+ /**
+ * Coefficient of x.
+ */
+ public double a;
- /**
- * Coefficient of y.
- */
- public double b;
+ /**
+ * Coefficient of y.
+ */
+ public double b;
- /**
- * Constant.
- */
- public double c;
+ /**
+ * Constant.
+ */
+ public double c;
- private boolean isNull;
+ private boolean isNull;
- /**
- * @param a coefficient of x
- * @param b coefficient of y
- * @param c constant
- */
- public PGline(double a, double b, double c) {
- this();
- this.a = a;
- this.b = b;
- this.c = c;
- }
-
- /**
- * @param x1 coordinate for first point on the line
- * @param y1 coordinate for first point on the line
- * @param x2 coordinate for second point on the line
- * @param y2 coordinate for second point on the line
- */
- public PGline(double x1, double y1, double x2, double y2) {
- this();
- setValue(x1, y1, x2, y2);
- }
-
- /**
- * @param p1 first point on the line
- * @param p2 second point on the line
- */
- public PGline(PGpoint p1, PGpoint p2) {
- this();
- setValue(p1, p2);
- }
-
- /**
- * @param lseg Line segment which calls on this line.
- */
- public PGline(PGlseg lseg) {
- this();
- if (lseg == null) {
- isNull = true;
- return;
- }
- PGpoint[] point = lseg.point;
- if (point == null) {
- isNull = true;
- return;
- }
- setValue(point[0], point[1]);
- }
-
- private void setValue(PGpoint p1, PGpoint p2) {
- if (p1 == null || p2 == null) {
- isNull = true;
- } else {
- setValue(p1.x, p1.y, p2.x, p2.y);
- }
- }
-
- private void setValue(double x1, double y1, double x2, double y2) {
- if (x1 == x2) {
- a = -1;
- b = 0;
- } else {
- a = (y2 - y1) / (x2 - x1);
- b = -1;
- }
- c = y1 - a * x1;
- }
-
- /**
- * @param s definition of the line in PostgreSQL's syntax.
- * @throws SQLException on conversion failure
- */
- public PGline(String s) throws SQLException {
- this();
- setValue(s);
- }
-
- /**
- * required by the driver.
- */
- public PGline() {
- type = "line";
- }
-
- /**
- * @param s Definition of the line in PostgreSQL's syntax
- * @throws SQLException on conversion failure
- */
- @Override
- public void setValue(String s) throws SQLException {
- isNull = s == null;
- if (s == null) {
- return;
- }
- if (s.trim().startsWith("{")) {
- PGtokenizer t = new PGtokenizer(PGtokenizer.removeCurlyBrace(s), ',');
- if (t.getSize() != 3) {
- throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
- PSQLState.DATA_TYPE_MISMATCH);
- }
- a = Double.parseDouble(t.getToken(0));
- b = Double.parseDouble(t.getToken(1));
- c = Double.parseDouble(t.getToken(2));
- } else if (s.trim().startsWith("[")) {
- PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
- if (t.getSize() != 2) {
- throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
- PSQLState.DATA_TYPE_MISMATCH);
- }
- PGpoint point1 = new PGpoint(t.getToken(0));
- PGpoint point2 = new PGpoint(t.getToken(1));
- a = point2.x - point1.x;
- b = point2.y - point1.y;
- c = point1.y;
- }
- }
-
- /**
- * @param obj Object to compare with
- * @return true if the two lines are identical
- */
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
- if (!super.equals(obj)) {
- return false;
+ /**
+ * @param a coefficient of x
+ * @param b coefficient of y
+ * @param c constant
+ */
+ public PGline(double a, double b, double c) {
+ this();
+ this.a = a;
+ this.b = b;
+ this.c = c;
}
- PGline pGline = (PGline) obj;
- if (isNull) {
- return pGline.isNull;
- } else if (pGline.isNull) {
- return false;
+ /**
+ * @param x1 coordinate for first point on the line
+ * @param y1 coordinate for first point on the line
+ * @param x2 coordinate for second point on the line
+ * @param y2 coordinate for second point on the line
+ */
+ public PGline(double x1, double y1, double x2, double y2) {
+ this();
+ setValue(x1, y1, x2, y2);
}
- return Double.compare(pGline.a, a) == 0
- && Double.compare(pGline.b, b) == 0
- && Double.compare(pGline.c, c) == 0;
- }
-
- @Override
- public int hashCode() {
- if (isNull) {
- return 0;
+ /**
+ * @param p1 first point on the line
+ * @param p2 second point on the line
+ */
+ public PGline(PGpoint p1, PGpoint p2) {
+ this();
+ setValue(p1, p2);
}
- int result = super.hashCode();
- long temp;
- temp = Double.doubleToLongBits(a);
- result = 31 * result + (int) (temp ^ (temp >>> 32));
- temp = Double.doubleToLongBits(b);
- result = 31 * result + (int) (temp ^ (temp >>> 32));
- temp = Double.doubleToLongBits(c);
- result = 31 * result + (int) (temp ^ (temp >>> 32));
- return result;
- }
- /**
- * @return the PGline in the syntax expected by org.postgresql
- */
- @Override
- public String getValue() {
- return isNull ? null : "{" + a + "," + b + "," + c + "}";
- }
+ /**
+ * @param lseg Line segment which calls on this line.
+ */
+ public PGline(PGlseg lseg) {
+ this();
+ if (lseg == null) {
+ isNull = true;
+ return;
+ }
+ PGpoint[] point = lseg.point;
+ if (point == null) {
+ isNull = true;
+ return;
+ }
+ setValue(point[0], point[1]);
+ }
- @Override
- public Object clone() throws CloneNotSupportedException {
- // squid:S2157 "Cloneables" should implement "clone
- return super.clone();
- }
+ /**
+ * @param s definition of the line in PostgreSQL's syntax.
+ * @throws SQLException on conversion failure
+ */
+ public PGline(String s) throws SQLException {
+ this();
+ setValue(s);
+ }
+
+ /**
+ * required by the driver.
+ */
+ public PGline() {
+ type = "line";
+ }
+
+ private void setValue(PGpoint p1, PGpoint p2) {
+ if (p1 == null || p2 == null) {
+ isNull = true;
+ } else {
+ setValue(p1.x, p1.y, p2.x, p2.y);
+ }
+ }
+
+ private void setValue(double x1, double y1, double x2, double y2) {
+ if (x1 == x2) {
+ a = -1;
+ b = 0;
+ } else {
+ a = (y2 - y1) / (x2 - x1);
+ b = -1;
+ }
+ c = y1 - a * x1;
+ }
+
+ /**
+ * @param obj Object to compare with
+ * @return true if the two lines are identical
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ if (!super.equals(obj)) {
+ return false;
+ }
+
+ PGline pGline = (PGline) obj;
+ if (isNull) {
+ return pGline.isNull;
+ } else if (pGline.isNull) {
+ return false;
+ }
+
+ return Double.compare(pGline.a, a) == 0
+ && Double.compare(pGline.b, b) == 0
+ && Double.compare(pGline.c, c) == 0;
+ }
+
+ @Override
+ public int hashCode() {
+ if (isNull) {
+ return 0;
+ }
+ int result = super.hashCode();
+ long temp;
+ temp = Double.doubleToLongBits(a);
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ temp = Double.doubleToLongBits(b);
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ temp = Double.doubleToLongBits(c);
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ return result;
+ }
+
+ /**
+ * @return the PGline in the syntax expected by org.postgresql
+ */
+ @Override
+ public String getValue() {
+ return isNull ? null : "{" + a + "," + b + "," + c + "}";
+ }
+
+ /**
+ * @param s Definition of the line in PostgreSQL's syntax
+ * @throws SQLException on conversion failure
+ */
+ @Override
+ public void setValue(String s) throws SQLException {
+ isNull = s == null;
+ if (s == null) {
+ return;
+ }
+ if (s.trim().startsWith("{")) {
+ PGtokenizer t = new PGtokenizer(PGtokenizer.removeCurlyBrace(s), ',');
+ if (t.getSize() != 3) {
+ throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+ PSQLState.DATA_TYPE_MISMATCH);
+ }
+ a = Double.parseDouble(t.getToken(0));
+ b = Double.parseDouble(t.getToken(1));
+ c = Double.parseDouble(t.getToken(2));
+ } else if (s.trim().startsWith("[")) {
+ PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
+ if (t.getSize() != 2) {
+ throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+ PSQLState.DATA_TYPE_MISMATCH);
+ }
+ PGpoint point1 = new PGpoint(t.getToken(0));
+ PGpoint point2 = new PGpoint(t.getToken(1));
+ a = point2.x - point1.x;
+ b = point2.y - point1.y;
+ c = point1.y;
+ }
+ }
+
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ // squid:S2157 "Cloneables" should implement "clone
+ return super.clone();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java
index da1c158..8c6c0f0 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java
@@ -19,123 +19,123 @@ import java.sql.SQLException;
*/
@SuppressWarnings("serial")
public class PGlseg extends PGobject implements Serializable, Cloneable {
- /**
- * These are the two points.
- */
- public PGpoint [] point;
+ /**
+ * These are the two points.
+ */
+ public PGpoint[] point;
- /**
- * @param x1 coordinate for first point
- * @param y1 coordinate for first point
- * @param x2 coordinate for second point
- * @param y2 coordinate for second point
- */
- public PGlseg(double x1, double y1, double x2, double y2) {
- this(new PGpoint(x1, y1), new PGpoint(x2, y2));
- }
-
- /**
- * @param p1 first point
- * @param p2 second point
- */
- public PGlseg(PGpoint p1, PGpoint p2) {
- this();
- point = new PGpoint[]{p1, p2};
- }
-
- /**
- * @param s definition of the line segment in PostgreSQL's syntax.
- * @throws SQLException on conversion failure
- */
- public PGlseg(String s) throws SQLException {
- this();
- setValue(s);
- }
-
- /**
- * required by the driver.
- */
- public PGlseg() {
- type = "lseg";
- }
-
- /**
- * @param s Definition of the line segment in PostgreSQL's syntax
- * @throws SQLException on conversion failure
- */
- @Override
- public void setValue(String s) throws SQLException {
- if (s == null) {
- point = null;
- return;
- }
- PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
- if (t.getSize() != 2) {
- throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
- PSQLState.DATA_TYPE_MISMATCH);
+ /**
+ * @param x1 coordinate for first point
+ * @param y1 coordinate for first point
+ * @param x2 coordinate for second point
+ * @param y2 coordinate for second point
+ */
+ public PGlseg(double x1, double y1, double x2, double y2) {
+ this(new PGpoint(x1, y1), new PGpoint(x2, y2));
}
- PGpoint[] point = this.point;
- if (point == null) {
- this.point = point = new PGpoint[2];
+ /**
+ * @param p1 first point
+ * @param p2 second point
+ */
+ public PGlseg(PGpoint p1, PGpoint p2) {
+ this();
+ point = new PGpoint[]{p1, p2};
}
- point[0] = new PGpoint(t.getToken(0));
- point[1] = new PGpoint(t.getToken(1));
- }
- /**
- * @param obj Object to compare with
- * @return true if the two line segments are identical
- */
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof PGlseg) {
- PGlseg p = (PGlseg) obj;
- PGpoint[] point = this.point;
- PGpoint[] pPoint = p.point;
- if (point == null) {
- return pPoint == null;
- } else if (pPoint == null) {
- return false;
- }
- return (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1]))
- || (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0]));
+ /**
+ * @param s definition of the line segment in PostgreSQL's syntax.
+ * @throws SQLException on conversion failure
+ */
+ public PGlseg(String s) throws SQLException {
+ this();
+ setValue(s);
}
- return false;
- }
- @Override
- public int hashCode() {
- PGpoint[] point = this.point;
- if (point == null) {
- return 0;
+ /**
+ * required by the driver.
+ */
+ public PGlseg() {
+ type = "lseg";
}
- return point[0].hashCode() ^ point[1].hashCode();
- }
- @Override
- public Object clone() throws CloneNotSupportedException {
- PGlseg newPGlseg = (PGlseg) super.clone();
- if (newPGlseg.point != null) {
- newPGlseg.point = newPGlseg.point.clone();
- for (int i = 0; i < newPGlseg.point.length; i++) {
- if (newPGlseg.point[i] != null) {
- newPGlseg.point[i] = (PGpoint) newPGlseg.point[i].clone();
+ /**
+ * @param obj Object to compare with
+ * @return true if the two line segments are identical
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof PGlseg) {
+ PGlseg p = (PGlseg) obj;
+ PGpoint[] point = this.point;
+ PGpoint[] pPoint = p.point;
+ if (point == null) {
+ return pPoint == null;
+ } else if (pPoint == null) {
+ return false;
+ }
+ return (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1]))
+ || (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0]));
}
- }
+ return false;
}
- return newPGlseg;
- }
- /**
- * @return the PGlseg in the syntax expected by org.postgresql
- */
- @Override
- public String getValue() {
- PGpoint[] point = this.point;
- if (point == null) {
- return null;
+ @Override
+ public int hashCode() {
+ PGpoint[] point = this.point;
+ if (point == null) {
+ return 0;
+ }
+ return point[0].hashCode() ^ point[1].hashCode();
+ }
+
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ PGlseg newPGlseg = (PGlseg) super.clone();
+ if (newPGlseg.point != null) {
+ newPGlseg.point = newPGlseg.point.clone();
+ for (int i = 0; i < newPGlseg.point.length; i++) {
+ if (newPGlseg.point[i] != null) {
+ newPGlseg.point[i] = (PGpoint) newPGlseg.point[i].clone();
+ }
+ }
+ }
+ return newPGlseg;
+ }
+
+ /**
+ * @return the PGlseg in the syntax expected by org.postgresql
+ */
+ @Override
+ public String getValue() {
+ PGpoint[] point = this.point;
+ if (point == null) {
+ return null;
+ }
+ return "[" + point[0] + "," + point[1] + "]";
+ }
+
+ /**
+ * @param s Definition of the line segment in PostgreSQL's syntax
+ * @throws SQLException on conversion failure
+ */
+ @Override
+ public void setValue(String s) throws SQLException {
+ if (s == null) {
+ point = null;
+ return;
+ }
+ PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
+ if (t.getSize() != 2) {
+ throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+ PSQLState.DATA_TYPE_MISMATCH);
+ }
+
+ PGpoint[] point = this.point;
+ if (point == null) {
+ this.point = point = new PGpoint[2];
+ }
+ point[0] = new PGpoint(t.getToken(0));
+ point[1] = new PGpoint(t.getToken(1));
}
- return "[" + point[0] + "," + point[1] + "]";
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java
index 807ee86..c8a54d1 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java
@@ -19,172 +19,173 @@ import java.sql.SQLException;
*/
@SuppressWarnings("serial")
public class PGpath extends PGobject implements Serializable, Cloneable {
- /**
- * True if the path is open, false if closed.
- */
- public boolean open;
+ /**
+ * True if the path is open, false if closed.
+ */
+ public boolean open;
- /**
- * The points defining this path.
- */
- public PGpoint [] points;
+ /**
+ * The points defining this path.
+ */
+ public PGpoint[] points;
- /**
- * @param points the PGpoints that define the path
- * @param open True if the path is open, false if closed
- */
- public PGpath(PGpoint [] points, boolean open) {
- this();
- this.points = points;
- this.open = open;
- }
-
- /**
- * Required by the driver.
- */
- public PGpath() {
- type = "path";
- }
-
- /**
- * @param s definition of the path in PostgreSQL's syntax.
- * @throws SQLException on conversion failure
- */
- public PGpath(String s) throws SQLException {
- this();
- setValue(s);
- }
-
- /**
- * @param s Definition of the path in PostgreSQL's syntax
- * @throws SQLException on conversion failure
- */
- @Override
- public void setValue(String s) throws SQLException {
- if (s == null) {
- points = null;
- return;
- }
- // First test to see if were open
- if (s.startsWith("[") && s.endsWith("]")) {
- open = true;
- s = PGtokenizer.removeBox(s);
- } else if (s.startsWith("(") && s.endsWith(")")) {
- open = false;
- s = PGtokenizer.removePara(s);
- } else {
- throw new PSQLException(GT.tr("Cannot tell if path is open or closed: {0}.", s),
- PSQLState.DATA_TYPE_MISMATCH);
+ /**
+ * @param points the PGpoints that define the path
+ * @param open True if the path is open, false if closed
+ */
+ public PGpath(PGpoint[] points, boolean open) {
+ this();
+ this.points = points;
+ this.open = open;
}
- PGtokenizer t = new PGtokenizer(s, ',');
- int npoints = t.getSize();
- PGpoint[] points = new PGpoint[npoints];
- this.points = points;
- for (int p = 0; p < npoints; p++) {
- points[p] = new PGpoint(t.getToken(p));
+ /**
+ * Required by the driver.
+ */
+ public PGpath() {
+ type = "path";
}
- }
- /**
- * @param obj Object to compare with
- * @return true if the two paths are identical
- */
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof PGpath) {
- PGpath p = (PGpath) obj;
+ /**
+ * @param s definition of the path in PostgreSQL's syntax.
+ * @throws SQLException on conversion failure
+ */
+ public PGpath(String s) throws SQLException {
+ this();
+ setValue(s);
+ }
- PGpoint[] points = this.points;
- PGpoint[] pPoints = p.points;
- if (points == null) {
- return pPoints == null;
- } else if (pPoints == null) {
- return false;
- }
+ /**
+ * @param obj Object to compare with
+ * @return true if the two paths are identical
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof PGpath) {
+ PGpath p = (PGpath) obj;
- if (p.open != open) {
- return false;
- }
+ PGpoint[] points = this.points;
+ PGpoint[] pPoints = p.points;
+ if (points == null) {
+ return pPoints == null;
+ } else if (pPoints == null) {
+ return false;
+ }
- if (pPoints.length != points.length) {
- return false;
- }
+ if (p.open != open) {
+ return false;
+ }
- for (int i = 0; i < points.length; i++) {
- if (!points[i].equals(pPoints[i])) {
- return false;
+ if (pPoints.length != points.length) {
+ return false;
+ }
+
+ for (int i = 0; i < points.length; i++) {
+ if (!points[i].equals(pPoints[i])) {
+ return false;
+ }
+ }
+
+ return true;
}
- }
-
- return true;
+ return false;
}
- return false;
- }
- @Override
- public int hashCode() {
- PGpoint[] points = this.points;
- if (points == null) {
- return 0;
+ @Override
+ public int hashCode() {
+ PGpoint[] points = this.points;
+ if (points == null) {
+ return 0;
+ }
+ // XXX not very good..
+ int hash = open ? 1231 : 1237;
+ for (int i = 0; i < points.length && i < 5; i++) {
+ hash = hash * 31 + points[i].hashCode();
+ }
+ return hash;
}
- // XXX not very good..
- int hash = open ? 1231 : 1237;
- for (int i = 0; i < points.length && i < 5; i++) {
- hash = hash * 31 + points[i].hashCode();
+
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ PGpath newPGpath = (PGpath) super.clone();
+ if (newPGpath.points != null) {
+ PGpoint[] newPoints = newPGpath.points.clone();
+ newPGpath.points = newPoints;
+ for (int i = 0; i < newPGpath.points.length; i++) {
+ newPoints[i] = (PGpoint) newPGpath.points[i].clone();
+ }
+ }
+ return newPGpath;
}
- return hash;
- }
- @Override
- public Object clone() throws CloneNotSupportedException {
- PGpath newPGpath = (PGpath) super.clone();
- if (newPGpath.points != null) {
- PGpoint[] newPoints = newPGpath.points.clone();
- newPGpath.points = newPoints;
- for (int i = 0; i < newPGpath.points.length; i++) {
- newPoints[i] = (PGpoint) newPGpath.points[i].clone();
- }
+ /**
+ * This returns the path in the syntax expected by org.postgresql.
+ *
+ * @return the value of this object
+ */
+ @Override
+ public String getValue() {
+ PGpoint[] points = this.points;
+ if (points == null) {
+ return null;
+ }
+ StringBuilder b = new StringBuilder(open ? "[" : "(");
+
+ for (int p = 0; p < points.length; p++) {
+ if (p > 0) {
+ b.append(",");
+ }
+ b.append(points[p].toString());
+ }
+ b.append(open ? "]" : ")");
+
+ return b.toString();
}
- return newPGpath;
- }
- /**
- * This returns the path in the syntax expected by org.postgresql.
- * @return the value of this object
- */
- @Override
- public String getValue() {
- PGpoint[] points = this.points;
- if (points == null) {
- return null;
+ /**
+ * @param s Definition of the path in PostgreSQL's syntax
+ * @throws SQLException on conversion failure
+ */
+ @Override
+ public void setValue(String s) throws SQLException {
+ if (s == null) {
+ points = null;
+ return;
+ }
+ // First test to see if were open
+ if (s.startsWith("[") && s.endsWith("]")) {
+ open = true;
+ s = PGtokenizer.removeBox(s);
+ } else if (s.startsWith("(") && s.endsWith(")")) {
+ open = false;
+ s = PGtokenizer.removePara(s);
+ } else {
+ throw new PSQLException(GT.tr("Cannot tell if path is open or closed: {0}.", s),
+ PSQLState.DATA_TYPE_MISMATCH);
+ }
+
+ PGtokenizer t = new PGtokenizer(s, ',');
+ int npoints = t.getSize();
+ PGpoint[] points = new PGpoint[npoints];
+ this.points = points;
+ for (int p = 0; p < npoints; p++) {
+ points[p] = new PGpoint(t.getToken(p));
+ }
}
- StringBuilder b = new StringBuilder(open ? "[" : "(");
- for (int p = 0; p < points.length; p++) {
- if (p > 0) {
- b.append(",");
- }
- b.append(points[p].toString());
+ public boolean isOpen() {
+ return open && points != null;
}
- b.append(open ? "]" : ")");
- return b.toString();
- }
+ public boolean isClosed() {
+ return !open && points != null;
+ }
- public boolean isOpen() {
- return open && points != null;
- }
+ public void closePath() {
+ open = false;
+ }
- public boolean isClosed() {
- return !open && points != null;
- }
-
- public void closePath() {
- open = false;
- }
-
- public void openPath() {
- open = true;
- }
+ public void openPath() {
+ open = true;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java
index 7744a30..e7138cf 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java
@@ -23,190 +23,190 @@ import java.sql.SQLException;
*/
@SuppressWarnings("serial")
public class PGpoint extends PGobject implements PGBinaryObject, Serializable, Cloneable {
- /**
- * The X coordinate of the point.
- */
- public double x;
+ /**
+ * The X coordinate of the point.
+ */
+ public double x;
- /**
- * The Y coordinate of the point.
- */
- public double y;
+ /**
+ * The Y coordinate of the point.
+ */
+ public double y;
- /**
- * True if the point represents {@code null::point}.
- */
- public boolean isNull;
+ /**
+ * True if the point represents {@code null::point}.
+ */
+ public boolean isNull;
- /**
- * @param x coordinate
- * @param y coordinate
- */
- public PGpoint(double x, double y) {
- this();
- this.x = x;
- this.y = y;
- }
-
- /**
- * This is called mainly from the other geometric types, when a point is embedded within their
- * definition.
- *
- * @param value Definition of this point in PostgreSQL's syntax
- * @throws SQLException if something goes wrong
- */
- public PGpoint(String value) throws SQLException {
- this();
- setValue(value);
- }
-
- /**
- * Required by the driver.
- */
- public PGpoint() {
- type = "point";
- }
-
- /**
- * @param s Definition of this point in PostgreSQL's syntax
- * @throws SQLException on conversion failure
- */
- @Override
- public void setValue(String s) throws SQLException {
- isNull = s == null;
- if (s == null) {
- return;
+ /**
+ * @param x coordinate
+ * @param y coordinate
+ */
+ public PGpoint(double x, double y) {
+ this();
+ this.x = x;
+ this.y = y;
}
- PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
- try {
- x = Double.parseDouble(t.getToken(0));
- y = Double.parseDouble(t.getToken(1));
- } catch (NumberFormatException e) {
- throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
- PSQLState.DATA_TYPE_MISMATCH, e);
+
+ /**
+ * This is called mainly from the other geometric types, when a point is embedded within their
+ * definition.
+ *
+ * @param value Definition of this point in PostgreSQL's syntax
+ * @throws SQLException if something goes wrong
+ */
+ public PGpoint(String value) throws SQLException {
+ this();
+ setValue(value);
}
- }
- /**
- * @param b Definition of this point in PostgreSQL's binary syntax
- */
- @Override
- public void setByteValue(byte[] b, int offset) {
- this.isNull = false;
- x = ByteConverter.float8(b, offset);
- y = ByteConverter.float8(b, offset + 8);
- }
+ /**
+ * Required by the driver.
+ */
+ public PGpoint() {
+ type = "point";
+ }
- /**
- * @param obj Object to compare with
- * @return true if the two points are identical
- */
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof PGpoint) {
- PGpoint p = (PGpoint) obj;
- if (isNull) {
- return p.isNull;
- } else if (p.isNull) {
+ /**
+ * @param b Definition of this point in PostgreSQL's binary syntax
+ */
+ @Override
+ public void setByteValue(byte[] b, int offset) {
+ this.isNull = false;
+ x = ByteConverter.float8(b, offset);
+ y = ByteConverter.float8(b, offset + 8);
+ }
+
+ /**
+ * @param obj Object to compare with
+ * @return true if the two points are identical
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof PGpoint) {
+ PGpoint p = (PGpoint) obj;
+ if (isNull) {
+ return p.isNull;
+ } else if (p.isNull) {
+ return false;
+ }
+ return x == p.x && y == p.y;
+ }
return false;
- }
- return x == p.x && y == p.y;
}
- return false;
- }
- @Override
- public int hashCode() {
- if (isNull) {
- return 0;
+ @Override
+ public int hashCode() {
+ if (isNull) {
+ return 0;
+ }
+ long v1 = Double.doubleToLongBits(x);
+ long v2 = Double.doubleToLongBits(y);
+ return (int) (v1 ^ v2 ^ (v1 >>> 32) ^ (v2 >>> 32));
}
- long v1 = Double.doubleToLongBits(x);
- long v2 = Double.doubleToLongBits(y);
- return (int) (v1 ^ v2 ^ (v1 >>> 32) ^ (v2 >>> 32));
- }
- /**
- * @return the PGpoint in the syntax expected by org.postgresql
- */
- @Override
- public String getValue() {
- return isNull ? null : "(" + x + "," + y + ")";
- }
-
- @Override
- public int lengthInBytes() {
- return isNull ? 0 : 16;
- }
-
- /**
- * Populate the byte array with PGpoint in the binary syntax expected by org.postgresql.
- */
- @Override
- public void toBytes(byte[] b, int offset) {
- if (isNull) {
- return;
+ /**
+ * @return the PGpoint in the syntax expected by org.postgresql
+ */
+ @Override
+ public String getValue() {
+ return isNull ? null : "(" + x + "," + y + ")";
}
- ByteConverter.float8(b, offset, x);
- ByteConverter.float8(b, offset + 8, y);
- }
- /**
- * Translate the point by the supplied amount.
- *
- * @param x integer amount to add on the x axis
- * @param y integer amount to add on the y axis
- */
- public void translate(int x, int y) {
- translate((double) x, (double) y);
- }
+ /**
+ * @param s Definition of this point in PostgreSQL's syntax
+ * @throws SQLException on conversion failure
+ */
+ @Override
+ public void setValue(String s) throws SQLException {
+ isNull = s == null;
+ if (s == null) {
+ return;
+ }
+ PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
+ try {
+ x = Double.parseDouble(t.getToken(0));
+ y = Double.parseDouble(t.getToken(1));
+ } catch (NumberFormatException e) {
+ throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+ PSQLState.DATA_TYPE_MISMATCH, e);
+ }
+ }
- /**
- * Translate the point by the supplied amount.
- *
- * @param x double amount to add on the x axis
- * @param y double amount to add on the y axis
- */
- public void translate(double x, double y) {
- this.isNull = false;
- this.x += x;
- this.y += y;
- }
+ @Override
+ public int lengthInBytes() {
+ return isNull ? 0 : 16;
+ }
- /**
- * Moves the point to the supplied coordinates.
- *
- * @param x integer coordinate
- * @param y integer coordinate
- */
- public void move(int x, int y) {
- setLocation(x, y);
- }
+ /**
+ * Populate the byte array with PGpoint in the binary syntax expected by org.postgresql.
+ */
+ @Override
+ public void toBytes(byte[] b, int offset) {
+ if (isNull) {
+ return;
+ }
+ ByteConverter.float8(b, offset, x);
+ ByteConverter.float8(b, offset + 8, y);
+ }
- /**
- * Moves the point to the supplied coordinates.
- *
- * @param x double coordinate
- * @param y double coordinate
- */
- public void move(double x, double y) {
- this.isNull = false;
- this.x = x;
- this.y = y;
- }
+ /**
+ * Translate the point by the supplied amount.
+ *
+ * @param x integer amount to add on the x axis
+ * @param y integer amount to add on the y axis
+ */
+ public void translate(int x, int y) {
+ translate((double) x, (double) y);
+ }
- /**
- * Moves the point to the supplied coordinates. refer to java.awt.Point for description of this.
- *
- * @param x integer coordinate
- * @param y integer coordinate
- */
- public void setLocation(int x, int y) {
- move((double) x, (double) y);
- }
+ /**
+ * Translate the point by the supplied amount.
+ *
+ * @param x double amount to add on the x axis
+ * @param y double amount to add on the y axis
+ */
+ public void translate(double x, double y) {
+ this.isNull = false;
+ this.x += x;
+ this.y += y;
+ }
- @Override
- public Object clone() throws CloneNotSupportedException {
- // squid:S2157 "Cloneables" should implement "clone
- return super.clone();
- }
+ /**
+ * Moves the point to the supplied coordinates.
+ *
+ * @param x integer coordinate
+ * @param y integer coordinate
+ */
+ public void move(int x, int y) {
+ setLocation(x, y);
+ }
+
+ /**
+ * Moves the point to the supplied coordinates.
+ *
+ * @param x double coordinate
+ * @param y double coordinate
+ */
+ public void move(double x, double y) {
+ this.isNull = false;
+ this.x = x;
+ this.y = y;
+ }
+
+ /**
+ * Moves the point to the supplied coordinates. refer to java.awt.Point for description of this.
+ *
+ * @param x integer coordinate
+ * @param y integer coordinate
+ */
+ public void setLocation(int x, int y) {
+ move((double) x, (double) y);
+ }
+
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ // squid:S2157 "Cloneables" should implement "clone
+ return super.clone();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java
index 3a0d7b9..f283531 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java
@@ -16,136 +16,136 @@ import java.sql.SQLException;
*/
@SuppressWarnings("serial")
public class PGpolygon extends PGobject implements Serializable, Cloneable {
- /**
- * The points defining the polygon.
- */
- public PGpoint [] points;
+ /**
+ * The points defining the polygon.
+ */
+ public PGpoint[] points;
- /**
- * Creates a polygon using an array of PGpoints.
- *
- * @param points the points defining the polygon
- */
- public PGpolygon(PGpoint[] points) {
- this();
- this.points = points;
- }
-
- /**
- * @param s definition of the polygon in PostgreSQL's syntax.
- * @throws SQLException on conversion failure
- */
- public PGpolygon(String s) throws SQLException {
- this();
- setValue(s);
- }
-
- /**
- * Required by the driver.
- */
- public PGpolygon() {
- type = "polygon";
- }
-
- /**
- * @param s Definition of the polygon in PostgreSQL's syntax
- * @throws SQLException on conversion failure
- */
- @Override
- public void setValue(String s) throws SQLException {
- if (s == null) {
- points = null;
- return;
+ /**
+ * Creates a polygon using an array of PGpoints.
+ *
+ * @param points the points defining the polygon
+ */
+ public PGpolygon(PGpoint[] points) {
+ this();
+ this.points = points;
}
- PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
- int npoints = t.getSize();
- PGpoint[] points = this.points;
- if (points == null || points.length != npoints) {
- this.points = points = new PGpoint[npoints];
+
+ /**
+ * @param s definition of the polygon in PostgreSQL's syntax.
+ * @throws SQLException on conversion failure
+ */
+ public PGpolygon(String s) throws SQLException {
+ this();
+ setValue(s);
}
- for (int p = 0; p < npoints; p++) {
- points[p] = new PGpoint(t.getToken(p));
+
+ /**
+ * Required by the driver.
+ */
+ public PGpolygon() {
+ type = "polygon";
}
- }
- /**
- * @param obj Object to compare with
- * @return true if the two polygons are identical
- */
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof PGpolygon) {
- PGpolygon p = (PGpolygon) obj;
+ /**
+ * @param obj Object to compare with
+ * @return true if the two polygons are identical
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof PGpolygon) {
+ PGpolygon p = (PGpolygon) obj;
- PGpoint[] points = this.points;
- PGpoint[] pPoints = p.points;
- if (points == null) {
- return pPoints == null;
- } else if (pPoints == null) {
- return false;
- }
+ PGpoint[] points = this.points;
+ PGpoint[] pPoints = p.points;
+ if (points == null) {
+ return pPoints == null;
+ } else if (pPoints == null) {
+ return false;
+ }
- if (pPoints.length != points.length) {
- return false;
- }
+ if (pPoints.length != points.length) {
+ return false;
+ }
- for (int i = 0; i < points.length; i++) {
- if (!points[i].equals(pPoints[i])) {
- return false;
+ for (int i = 0; i < points.length; i++) {
+ if (!points[i].equals(pPoints[i])) {
+ return false;
+ }
+ }
+
+ return true;
}
- }
-
- return true;
+ return false;
}
- return false;
- }
- @Override
- public int hashCode() {
- int hash = 0;
- PGpoint[] points = this.points;
- if (points == null) {
- return hash;
- }
- for (int i = 0; i < points.length && i < 5; i++) {
- hash = hash * 31 + points[i].hashCode();
- }
- return hash;
- }
-
- @Override
- public Object clone() throws CloneNotSupportedException {
- PGpolygon newPGpolygon = (PGpolygon) super.clone();
- if (newPGpolygon.points != null) {
- PGpoint[] newPoints = newPGpolygon.points.clone();
- newPGpolygon.points = newPoints;
- for (int i = 0; i < newPGpolygon.points.length; i++) {
- if (newPGpolygon.points[i] != null) {
- newPoints[i] = (PGpoint) newPGpolygon.points[i].clone();
+ @Override
+ public int hashCode() {
+ int hash = 0;
+ PGpoint[] points = this.points;
+ if (points == null) {
+ return hash;
}
- }
+ for (int i = 0; i < points.length && i < 5; i++) {
+ hash = hash * 31 + points[i].hashCode();
+ }
+ return hash;
}
- return newPGpolygon;
- }
- /**
- * @return the PGpolygon in the syntax expected by org.postgresql
- */
- @Override
- public String getValue() {
- PGpoint[] points = this.points;
- if (points == null) {
- return null;
+ @Override
+ public Object clone() throws CloneNotSupportedException {
+ PGpolygon newPGpolygon = (PGpolygon) super.clone();
+ if (newPGpolygon.points != null) {
+ PGpoint[] newPoints = newPGpolygon.points.clone();
+ newPGpolygon.points = newPoints;
+ for (int i = 0; i < newPGpolygon.points.length; i++) {
+ if (newPGpolygon.points[i] != null) {
+ newPoints[i] = (PGpoint) newPGpolygon.points[i].clone();
+ }
+ }
+ }
+ return newPGpolygon;
}
- StringBuilder b = new StringBuilder();
- b.append("(");
- for (int p = 0; p < points.length; p++) {
- if (p > 0) {
- b.append(",");
- }
- b.append(points[p].toString());
+
+ /**
+ * @return the PGpolygon in the syntax expected by org.postgresql
+ */
+ @Override
+ public String getValue() {
+ PGpoint[] points = this.points;
+ if (points == null) {
+ return null;
+ }
+ StringBuilder b = new StringBuilder();
+ b.append("(");
+ for (int p = 0; p < points.length; p++) {
+ if (p > 0) {
+ b.append(",");
+ }
+ b.append(points[p].toString());
+ }
+ b.append(")");
+ return b.toString();
+ }
+
+ /**
+ * @param s Definition of the polygon in PostgreSQL's syntax
+ * @throws SQLException on conversion failure
+ */
+ @Override
+ public void setValue(String s) throws SQLException {
+ if (s == null) {
+ points = null;
+ return;
+ }
+ PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
+ int npoints = t.getSize();
+ PGpoint[] points = this.points;
+ if (points == null || points.length != npoints) {
+ this.points = points = new PGpoint[npoints];
+ }
+ for (int p = 0; p < npoints; p++) {
+ points[p] = new PGpoint(t.getToken(p));
+ }
}
- b.append(")");
- return b.toString();
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java
index 9ec36fe..8c09381 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java
@@ -6,7 +6,6 @@
package org.postgresql.gss;
import java.io.IOException;
-
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
@@ -20,44 +19,44 @@ import javax.security.auth.callback.UnsupportedCallbackException;
class GSSCallbackHandler implements CallbackHandler {
- private final String user;
- private final char [] password;
+ private final String user;
+ private final char[] password;
- GSSCallbackHandler(String user, char [] password) {
- this.user = user;
- this.password = password;
- }
-
- @Override
- public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
- for (Callback callback : callbacks) {
- if (callback instanceof TextOutputCallback) {
- TextOutputCallback toc = (TextOutputCallback) callback;
- switch (toc.getMessageType()) {
- case TextOutputCallback.INFORMATION:
- System.out.println("INFO: " + toc.getMessage());
- break;
- case TextOutputCallback.ERROR:
- System.out.println("ERROR: " + toc.getMessage());
- break;
- case TextOutputCallback.WARNING:
- System.out.println("WARNING: " + toc.getMessage());
- break;
- default:
- throw new IOException("Unsupported message type: " + toc.getMessageType());
- }
- } else if (callback instanceof NameCallback) {
- NameCallback nc = (NameCallback) callback;
- nc.setName(user);
- } else if (callback instanceof PasswordCallback) {
- PasswordCallback pc = (PasswordCallback) callback;
- if (password == null) {
- throw new IOException("No cached kerberos ticket found and no password supplied.");
- }
- pc.setPassword(password);
- } else {
- throw new UnsupportedCallbackException(callback, "Unrecognized Callback");
- }
+ GSSCallbackHandler(String user, char[] password) {
+ this.user = user;
+ this.password = password;
+ }
+
+ @Override
+ public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
+ for (Callback callback : callbacks) {
+ if (callback instanceof TextOutputCallback) {
+ TextOutputCallback toc = (TextOutputCallback) callback;
+ switch (toc.getMessageType()) {
+ case TextOutputCallback.INFORMATION:
+ System.out.println("INFO: " + toc.getMessage());
+ break;
+ case TextOutputCallback.ERROR:
+ System.out.println("ERROR: " + toc.getMessage());
+ break;
+ case TextOutputCallback.WARNING:
+ System.out.println("WARNING: " + toc.getMessage());
+ break;
+ default:
+ throw new IOException("Unsupported message type: " + toc.getMessageType());
+ }
+ } else if (callback instanceof NameCallback) {
+ NameCallback nc = (NameCallback) callback;
+ nc.setName(user);
+ } else if (callback instanceof PasswordCallback) {
+ PasswordCallback pc = (PasswordCallback) callback;
+ if (password == null) {
+ throw new IOException("No cached kerberos ticket found and no password supplied.");
+ }
+ pc.setPassword(password);
+ } else {
+ throw new UnsupportedCallbackException(callback, "Unrecognized Callback");
+ }
+ }
}
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java
index 2007b82..e45d711 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java
@@ -5,69 +5,68 @@
package org.postgresql.gss;
+import java.io.IOException;
+import java.io.InputStream;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.MessageProp;
-import java.io.IOException;
-import java.io.InputStream;
-
public class GSSInputStream extends InputStream {
- private final GSSContext gssContext;
- private final MessageProp messageProp;
- private final InputStream wrapped;
- byte [] unencrypted;
- int unencryptedPos;
- int unencryptedLength;
+ private final GSSContext gssContext;
+ private final MessageProp messageProp;
+ private final InputStream wrapped;
+ byte[] unencrypted;
+ int unencryptedPos;
+ int unencryptedLength;
- public GSSInputStream(InputStream wrapped, GSSContext gssContext, MessageProp messageProp) {
- this.wrapped = wrapped;
- this.gssContext = gssContext;
- this.messageProp = messageProp;
- }
+ public GSSInputStream(InputStream wrapped, GSSContext gssContext, MessageProp messageProp) {
+ this.wrapped = wrapped;
+ this.gssContext = gssContext;
+ this.messageProp = messageProp;
+ }
- @Override
- public int read() throws IOException {
- return 0;
- }
+ @Override
+ public int read() throws IOException {
+ return 0;
+ }
- @Override
- public int read(byte [] buffer, int pos, int len) throws IOException {
- byte[] int4Buf = new byte[4];
- int encryptedLength;
- int copyLength = 0;
+ @Override
+ public int read(byte[] buffer, int pos, int len) throws IOException {
+ byte[] int4Buf = new byte[4];
+ int encryptedLength;
+ int copyLength = 0;
- if ( unencryptedLength > 0 ) {
- copyLength = Math.min(len, unencryptedLength);
- System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
- unencryptedLength -= copyLength;
- unencryptedPos += copyLength;
- } else {
- if (wrapped.read(int4Buf, 0, 4) == 4 ) {
+ if (unencryptedLength > 0) {
+ copyLength = Math.min(len, unencryptedLength);
+ System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
+ unencryptedLength -= copyLength;
+ unencryptedPos += copyLength;
+ } else {
+ if (wrapped.read(int4Buf, 0, 4) == 4) {
- encryptedLength = (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
- | int4Buf[3] & 0xFF;
+ encryptedLength = (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
+ | int4Buf[3] & 0xFF;
- byte[] encryptedBuffer = new byte[encryptedLength];
- wrapped.read(encryptedBuffer, 0, encryptedLength);
+ byte[] encryptedBuffer = new byte[encryptedLength];
+ wrapped.read(encryptedBuffer, 0, encryptedLength);
- try {
- byte[] unencrypted = gssContext.unwrap(encryptedBuffer, 0, encryptedLength, messageProp);
- this.unencrypted = unencrypted;
- unencryptedLength = unencrypted.length;
- unencryptedPos = 0;
+ try {
+ byte[] unencrypted = gssContext.unwrap(encryptedBuffer, 0, encryptedLength, messageProp);
+ this.unencrypted = unencrypted;
+ unencryptedLength = unencrypted.length;
+ unencryptedPos = 0;
- copyLength = Math.min(len, unencrypted.length);
- System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
- unencryptedLength -= copyLength;
- unencryptedPos += copyLength;
+ copyLength = Math.min(len, unencrypted.length);
+ System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
+ unencryptedLength -= copyLength;
+ unencryptedPos += copyLength;
- } catch (GSSException e) {
- throw new IOException(e);
+ } catch (GSSException e) {
+ throw new IOException(e);
+ }
+ return copyLength;
+ }
}
return copyLength;
- }
}
- return copyLength;
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java
index 197ddb7..c3a4f31 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java
@@ -5,81 +5,80 @@
package org.postgresql.gss;
+import java.io.IOException;
+import java.io.OutputStream;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.MessageProp;
-import java.io.IOException;
-import java.io.OutputStream;
-
public class GSSOutputStream extends OutputStream {
- private final GSSContext gssContext;
- private final MessageProp messageProp;
- private final byte[] buffer;
- private final byte[] int4Buf = new byte[4];
- private int index;
- private final OutputStream wrapped;
+ private final GSSContext gssContext;
+ private final MessageProp messageProp;
+ private final byte[] buffer;
+ private final byte[] int4Buf = new byte[4];
+ private final OutputStream wrapped;
+ private int index;
- public GSSOutputStream(OutputStream out, GSSContext gssContext, MessageProp messageProp, int bufferSize) {
- wrapped = out;
- this.gssContext = gssContext;
- this.messageProp = messageProp;
- buffer = new byte[bufferSize];
- }
-
- @Override
- public void write(int b) throws IOException {
- buffer[index++] = (byte) b;
- if (index >= buffer.length) {
- flush();
+ public GSSOutputStream(OutputStream out, GSSContext gssContext, MessageProp messageProp, int bufferSize) {
+ wrapped = out;
+ this.gssContext = gssContext;
+ this.messageProp = messageProp;
+ buffer = new byte[bufferSize];
}
- }
- @Override
- public void write(byte[] buf) throws IOException {
- write(buf, 0, buf.length);
- }
-
- @Override
- public void write(byte[] b, int pos, int len) throws IOException {
- int max;
-
- while ( len > 0 ) {
- int roomToWrite = buffer.length - index;
- if ( len < roomToWrite ) {
- System.arraycopy(b, pos, buffer, index, len);
- index += len;
- len -= roomToWrite;
- } else {
- System.arraycopy(b, pos, buffer, index, roomToWrite);
- index += roomToWrite;
- len -= roomToWrite;
- }
- if (roomToWrite == 0) {
- flush();
- }
+ @Override
+ public void write(int b) throws IOException {
+ buffer[index++] = (byte) b;
+ if (index >= buffer.length) {
+ flush();
+ }
}
- }
- @Override
- public void flush() throws IOException {
- try {
- byte[] token = gssContext.wrap(buffer, 0, index, messageProp);
- sendInteger4Raw(token.length);
- wrapped.write(token, 0, token.length);
- index = 0;
- } catch ( GSSException ex ) {
- throw new IOException(ex);
+ @Override
+ public void write(byte[] buf) throws IOException {
+ write(buf, 0, buf.length);
}
- wrapped.flush();
- }
- private void sendInteger4Raw(int val) throws IOException {
- int4Buf[0] = (byte) (val >>> 24);
- int4Buf[1] = (byte) (val >>> 16);
- int4Buf[2] = (byte) (val >>> 8);
- int4Buf[3] = (byte) (val);
- wrapped.write(int4Buf);
- }
+ @Override
+ public void write(byte[] b, int pos, int len) throws IOException {
+ int max;
+
+ while (len > 0) {
+ int roomToWrite = buffer.length - index;
+ if (len < roomToWrite) {
+ System.arraycopy(b, pos, buffer, index, len);
+ index += len;
+ len -= roomToWrite;
+ } else {
+ System.arraycopy(b, pos, buffer, index, roomToWrite);
+ index += roomToWrite;
+ len -= roomToWrite;
+ }
+ if (roomToWrite == 0) {
+ flush();
+ }
+ }
+ }
+
+ @Override
+ public void flush() throws IOException {
+ try {
+ byte[] token = gssContext.wrap(buffer, 0, index, messageProp);
+ sendInteger4Raw(token.length);
+ wrapped.write(token, 0, token.length);
+ index = 0;
+ } catch (GSSException ex) {
+ throw new IOException(ex);
+ }
+ wrapped.flush();
+ }
+
+ private void sendInteger4Raw(int val) throws IOException {
+ int4Buf[0] = (byte) (val >>> 24);
+ int4Buf[1] = (byte) (val >>> 16);
+ int4Buf[2] = (byte) (val >>> 8);
+ int4Buf[3] = (byte) (val);
+ wrapped.write(int4Buf);
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java b/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java
index b1ad40f..7b4b65d 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java
@@ -5,19 +5,6 @@
package org.postgresql.gss;
-import org.postgresql.core.PGStream;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-import org.postgresql.util.ServerErrorMessage;
-
-import org.ietf.jgss.GSSContext;
-import org.ietf.jgss.GSSCredential;
-import org.ietf.jgss.GSSException;
-import org.ietf.jgss.GSSManager;
-import org.ietf.jgss.GSSName;
-import org.ietf.jgss.Oid;
-
import java.io.IOException;
import java.security.Principal;
import java.security.PrivilegedAction;
@@ -26,151 +13,161 @@ import java.util.Set;
import java.util.concurrent.Callable;
import java.util.logging.Level;
import java.util.logging.Logger;
-
import javax.security.auth.Subject;
+import org.ietf.jgss.GSSContext;
+import org.ietf.jgss.GSSCredential;
+import org.ietf.jgss.GSSException;
+import org.ietf.jgss.GSSManager;
+import org.ietf.jgss.GSSName;
+import org.ietf.jgss.Oid;
+import org.postgresql.core.PGStream;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import org.postgresql.util.ServerErrorMessage;
class GssAction implements PrivilegedAction, Callable {
- private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
- private final PGStream pgStream;
- private final String host;
- private final String kerberosServerName;
- private final String user;
- private final boolean useSpnego;
- private final Subject subject;
- private final boolean logServerErrorDetail;
+ private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
+ private final PGStream pgStream;
+ private final String host;
+ private final String kerberosServerName;
+ private final String user;
+ private final boolean useSpnego;
+ private final Subject subject;
+ private final boolean logServerErrorDetail;
- GssAction(PGStream pgStream, Subject subject, String host, String user,
- String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
- this.pgStream = pgStream;
- this.subject = subject;
- this.host = host;
- this.user = user;
- this.kerberosServerName = kerberosServerName;
- this.useSpnego = useSpnego;
- this.logServerErrorDetail = logServerErrorDetail;
- }
-
- private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
- Oid spnego = new Oid("1.3.6.1.5.5.2");
- Oid[] mechs = manager.getMechs();
-
- for (Oid mech : mechs) {
- if (mech.equals(spnego)) {
- return true;
- }
+ GssAction(PGStream pgStream, Subject subject, String host, String user,
+ String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
+ this.pgStream = pgStream;
+ this.subject = subject;
+ this.host = host;
+ this.user = user;
+ this.kerberosServerName = kerberosServerName;
+ this.useSpnego = useSpnego;
+ this.logServerErrorDetail = logServerErrorDetail;
}
- return false;
- }
+ private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
+ Oid spnego = new Oid("1.3.6.1.5.5.2");
+ Oid[] mechs = manager.getMechs();
- @Override
- public Exception run() {
- try {
- GSSManager manager = GSSManager.getInstance();
- GSSCredential clientCreds = null;
- Oid[] desiredMechs = new Oid[1];
-
- //Try to get credential from subject first.
- GSSCredential gssCredential = null;
- if (subject != null) {
- Set gssCreds = subject.getPrivateCredentials(GSSCredential.class);
- if (gssCreds != null && !gssCreds.isEmpty()) {
- gssCredential = gssCreds.iterator().next();
- }
- }
-
- //If failed to get credential from subject,
- //then call createCredential to create one.
- if (gssCredential == null) {
- if (useSpnego && hasSpnegoSupport(manager)) {
- desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
- } else {
- desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
- }
- String principalName = this.user;
- if (subject != null) {
- Set principals = subject.getPrincipals();
- Iterator principalIterator = principals.iterator();
-
- Principal principal = null;
- if (principalIterator.hasNext()) {
- principal = principalIterator.next();
- principalName = principal.getName();
- }
+ for (Oid mech : mechs) {
+ if (mech.equals(spnego)) {
+ return true;
+ }
}
- GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
- clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
- GSSCredential.INITIATE_ONLY);
- } else {
- desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
- clientCreds = gssCredential;
- }
-
- GSSName serverName =
- manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
-
- GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
- GSSContext.DEFAULT_LIFETIME);
- secContext.requestMutualAuth(true);
-
- byte[] inToken = new byte[0];
- byte[] outToken = null;
-
- boolean established = false;
- while (!established) {
- outToken = secContext.initSecContext(inToken, 0, inToken.length);
-
- if (outToken != null) {
- LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
-
- pgStream.sendChar('p');
- pgStream.sendInteger4(4 + outToken.length);
- pgStream.send(outToken);
- pgStream.flush();
- }
-
- if (!secContext.isEstablished()) {
- int response = pgStream.receiveChar();
- // Error
- switch (response) {
- case 'E':
- int elen = pgStream.receiveInteger4();
- ServerErrorMessage errorMsg
- = new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
-
- LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
-
- return new PSQLException(errorMsg, logServerErrorDetail);
- case 'R':
- LOGGER.log(Level.FINEST, " <=BE AuthenticationGSSContinue");
- int len = pgStream.receiveInteger4();
- int type = pgStream.receiveInteger4();
- // should check type = 8
- inToken = pgStream.receive(len - 8);
- break;
- default:
- // Unknown/unexpected message type.
- return new PSQLException(GT.tr("Protocol error. Session setup failed."),
- PSQLState.CONNECTION_UNABLE_TO_CONNECT);
- }
- } else {
- established = true;
- }
- }
-
- } catch (IOException e) {
- return e;
- } catch (GSSException gsse) {
- return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
- gsse);
+ return false;
}
- return null;
- }
- @Override
- public Exception call() throws Exception {
- return run();
- }
+ @Override
+ public Exception run() {
+ try {
+ GSSManager manager = GSSManager.getInstance();
+ GSSCredential clientCreds = null;
+ Oid[] desiredMechs = new Oid[1];
+
+ //Try to get credential from subject first.
+ GSSCredential gssCredential = null;
+ if (subject != null) {
+ Set gssCreds = subject.getPrivateCredentials(GSSCredential.class);
+ if (gssCreds != null && !gssCreds.isEmpty()) {
+ gssCredential = gssCreds.iterator().next();
+ }
+ }
+
+ //If failed to get credential from subject,
+ //then call createCredential to create one.
+ if (gssCredential == null) {
+ if (useSpnego && hasSpnegoSupport(manager)) {
+ desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
+ } else {
+ desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+ }
+ String principalName = this.user;
+ if (subject != null) {
+ Set principals = subject.getPrincipals();
+ Iterator principalIterator = principals.iterator();
+
+ Principal principal = null;
+ if (principalIterator.hasNext()) {
+ principal = principalIterator.next();
+ principalName = principal.getName();
+ }
+ }
+
+ GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
+ clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
+ GSSCredential.INITIATE_ONLY);
+ } else {
+ desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+ clientCreds = gssCredential;
+ }
+
+ GSSName serverName =
+ manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
+
+ GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
+ GSSContext.DEFAULT_LIFETIME);
+ secContext.requestMutualAuth(true);
+
+ byte[] inToken = new byte[0];
+ byte[] outToken = null;
+
+ boolean established = false;
+ while (!established) {
+ outToken = secContext.initSecContext(inToken, 0, inToken.length);
+
+ if (outToken != null) {
+ LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
+
+ pgStream.sendChar('p');
+ pgStream.sendInteger4(4 + outToken.length);
+ pgStream.send(outToken);
+ pgStream.flush();
+ }
+
+ if (!secContext.isEstablished()) {
+ int response = pgStream.receiveChar();
+ // Error
+ switch (response) {
+ case 'E':
+ int elen = pgStream.receiveInteger4();
+ ServerErrorMessage errorMsg
+ = new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
+
+ LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
+
+ return new PSQLException(errorMsg, logServerErrorDetail);
+ case 'R':
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationGSSContinue");
+ int len = pgStream.receiveInteger4();
+ int type = pgStream.receiveInteger4();
+ // should check type = 8
+ inToken = pgStream.receive(len - 8);
+ break;
+ default:
+ // Unknown/unexpected message type.
+ return new PSQLException(GT.tr("Protocol error. Session setup failed."),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+ }
+ } else {
+ established = true;
+ }
+ }
+
+ } catch (IOException e) {
+ return e;
+ } catch (GSSException gsse) {
+ return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
+ gsse);
+ }
+ return null;
+ }
+
+ @Override
+ public Exception call() throws Exception {
+ return run();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java b/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java
index 37cc0d8..525a730 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java
@@ -5,18 +5,6 @@
package org.postgresql.gss;
-import org.postgresql.core.PGStream;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.ietf.jgss.GSSContext;
-import org.ietf.jgss.GSSCredential;
-import org.ietf.jgss.GSSException;
-import org.ietf.jgss.GSSManager;
-import org.ietf.jgss.GSSName;
-import org.ietf.jgss.Oid;
-
import java.io.IOException;
import java.security.Principal;
import java.security.PrivilegedAction;
@@ -25,133 +13,142 @@ import java.util.Set;
import java.util.concurrent.Callable;
import java.util.logging.Level;
import java.util.logging.Logger;
-
import javax.security.auth.Subject;
+import org.ietf.jgss.GSSContext;
+import org.ietf.jgss.GSSCredential;
+import org.ietf.jgss.GSSException;
+import org.ietf.jgss.GSSManager;
+import org.ietf.jgss.GSSName;
+import org.ietf.jgss.Oid;
+import org.postgresql.core.PGStream;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
public class GssEncAction implements PrivilegedAction, Callable {
- private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
- private final PGStream pgStream;
- private final String host;
- private final String user;
- private final String kerberosServerName;
- private final boolean useSpnego;
- private final Subject subject;
- private final boolean logServerErrorDetail;
+ private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
+ private final PGStream pgStream;
+ private final String host;
+ private final String user;
+ private final String kerberosServerName;
+ private final boolean useSpnego;
+ private final Subject subject;
+ private final boolean logServerErrorDetail;
- public GssEncAction(PGStream pgStream, Subject subject,
- String host, String user,
- String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
- this.pgStream = pgStream;
- this.subject = subject;
- this.host = host;
- this.user = user;
- this.kerberosServerName = kerberosServerName;
- this.useSpnego = useSpnego;
- this.logServerErrorDetail = logServerErrorDetail;
- }
-
- private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
- Oid spnego = new Oid("1.3.6.1.5.5.2");
- Oid[] mechs = manager.getMechs();
-
- for (Oid mech : mechs) {
- if (mech.equals(spnego)) {
- return true;
- }
+ public GssEncAction(PGStream pgStream, Subject subject,
+ String host, String user,
+ String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
+ this.pgStream = pgStream;
+ this.subject = subject;
+ this.host = host;
+ this.user = user;
+ this.kerberosServerName = kerberosServerName;
+ this.useSpnego = useSpnego;
+ this.logServerErrorDetail = logServerErrorDetail;
}
- return false;
- }
+ private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
+ Oid spnego = new Oid("1.3.6.1.5.5.2");
+ Oid[] mechs = manager.getMechs();
- @Override
- public Exception run() {
- try {
- GSSManager manager = GSSManager.getInstance();
- GSSCredential clientCreds = null;
- Oid[] desiredMechs = new Oid[1];
-
- //Try to get credential from subject first.
- GSSCredential gssCredential = null;
- if (subject != null) {
- Set gssCreds = subject.getPrivateCredentials(GSSCredential.class);
- if (gssCreds != null && !gssCreds.isEmpty()) {
- gssCredential = gssCreds.iterator().next();
- }
- }
-
- //If failed to get credential from subject,
- //then call createCredential to create one.
- if (gssCredential == null) {
- if (useSpnego && hasSpnegoSupport(manager)) {
- desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
- } else {
- desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
- }
- String principalName = this.user;
- if (subject != null) {
- Set principals = subject.getPrincipals();
- Iterator principalIterator = principals.iterator();
-
- Principal principal = null;
- if (principalIterator.hasNext()) {
- principal = principalIterator.next();
- principalName = principal.getName();
- }
+ for (Oid mech : mechs) {
+ if (mech.equals(spnego)) {
+ return true;
+ }
}
- GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
- clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
- GSSCredential.INITIATE_ONLY);
- } else {
- desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
- clientCreds = gssCredential;
- }
- GSSName serverName =
- manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
-
- GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
- GSSContext.DEFAULT_LIFETIME);
- secContext.requestMutualAuth(true);
- secContext.requestConf(true);
- secContext.requestInteg(true);
-
- byte[] inToken = new byte[0];
- byte[] outToken = null;
-
- boolean established = false;
- while (!established) {
- outToken = secContext.initSecContext(inToken, 0, inToken.length);
-
- if (outToken != null) {
- LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
-
- pgStream.sendInteger4(outToken.length);
- pgStream.send(outToken);
- pgStream.flush();
- }
-
- if (!secContext.isEstablished()) {
- int len = pgStream.receiveInteger4();
- // should check type = 8
- inToken = pgStream.receive(len);
- } else {
- established = true;
- pgStream.setSecContext(secContext);
- }
- }
-
- } catch (IOException e) {
- return e;
- } catch (GSSException gsse) {
- return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
- gsse);
+ return false;
}
- return null;
- }
+ @Override
+ public Exception run() {
+ try {
+ GSSManager manager = GSSManager.getInstance();
+ GSSCredential clientCreds = null;
+ Oid[] desiredMechs = new Oid[1];
- @Override
- public Exception call() throws Exception {
- return run();
- }
+ //Try to get credential from subject first.
+ GSSCredential gssCredential = null;
+ if (subject != null) {
+ Set gssCreds = subject.getPrivateCredentials(GSSCredential.class);
+ if (gssCreds != null && !gssCreds.isEmpty()) {
+ gssCredential = gssCreds.iterator().next();
+ }
+ }
+
+ //If failed to get credential from subject,
+ //then call createCredential to create one.
+ if (gssCredential == null) {
+ if (useSpnego && hasSpnegoSupport(manager)) {
+ desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
+ } else {
+ desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+ }
+ String principalName = this.user;
+ if (subject != null) {
+ Set principals = subject.getPrincipals();
+ Iterator principalIterator = principals.iterator();
+
+ Principal principal = null;
+ if (principalIterator.hasNext()) {
+ principal = principalIterator.next();
+ principalName = principal.getName();
+ }
+ }
+
+ GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
+ clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
+ GSSCredential.INITIATE_ONLY);
+ } else {
+ desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+ clientCreds = gssCredential;
+ }
+ GSSName serverName =
+ manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
+
+ GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
+ GSSContext.DEFAULT_LIFETIME);
+ secContext.requestMutualAuth(true);
+ secContext.requestConf(true);
+ secContext.requestInteg(true);
+
+ byte[] inToken = new byte[0];
+ byte[] outToken = null;
+
+ boolean established = false;
+ while (!established) {
+ outToken = secContext.initSecContext(inToken, 0, inToken.length);
+
+ if (outToken != null) {
+ LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
+
+ pgStream.sendInteger4(outToken.length);
+ pgStream.send(outToken);
+ pgStream.flush();
+ }
+
+ if (!secContext.isEstablished()) {
+ int len = pgStream.receiveInteger4();
+ // should check type = 8
+ inToken = pgStream.receive(len);
+ } else {
+ established = true;
+ pgStream.setSecContext(secContext);
+ }
+ }
+
+ } catch (IOException e) {
+ return e;
+ } catch (GSSException gsse) {
+ return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
+ gsse);
+ }
+
+ return null;
+ }
+
+ @Override
+ public Exception call() throws Exception {
+ return run();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java b/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java
index a548275..57b2181 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java
@@ -5,14 +5,6 @@
package org.postgresql.gss;
-import org.postgresql.PGProperty;
-import org.postgresql.core.PGStream;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.ietf.jgss.GSSCredential;
-
import java.io.IOException;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
@@ -22,160 +14,166 @@ import java.util.Set;
import java.util.concurrent.Callable;
import java.util.logging.Level;
import java.util.logging.Logger;
-
import javax.security.auth.Subject;
import javax.security.auth.login.LoginContext;
+import org.ietf.jgss.GSSCredential;
+import org.postgresql.PGProperty;
+import org.postgresql.core.PGStream;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
public class MakeGSS {
- private static final Logger LOGGER = Logger.getLogger(MakeGSS.class.getName());
- private static final MethodHandle SUBJECT_CURRENT;
- private static final MethodHandle ACCESS_CONTROLLER_GET_CONTEXT;
- private static final MethodHandle SUBJECT_GET_SUBJECT;
- // Java <18
- private static final MethodHandle SUBJECT_DO_AS;
- // Java 18+, see https://bugs.openjdk.org/browse/JDK-8267108
- private static final MethodHandle SUBJECT_CALL_AS;
+ private static final Logger LOGGER = Logger.getLogger(MakeGSS.class.getName());
+ private static final MethodHandle SUBJECT_CURRENT;
+ private static final MethodHandle ACCESS_CONTROLLER_GET_CONTEXT;
+ private static final MethodHandle SUBJECT_GET_SUBJECT;
+ // Java <18
+ private static final MethodHandle SUBJECT_DO_AS;
+ // Java 18+, see https://bugs.openjdk.org/browse/JDK-8267108
+ private static final MethodHandle SUBJECT_CALL_AS;
- static {
- MethodHandle subjectCurrent = null;
- try {
- subjectCurrent = MethodHandles.lookup()
- .findStatic(Subject.class, "current", MethodType.methodType(Subject.class));
- } catch (NoSuchMethodException | IllegalAccessException ignore) {
- // E.g. pre Java 18
- }
- SUBJECT_CURRENT = subjectCurrent;
-
- MethodHandle accessControllerGetContext = null;
- MethodHandle subjectGetSubject = null;
-
- try {
- Class> accessControllerClass = Class.forName("java.security.AccessController");
- Class> accessControlContextClass =
- Class.forName("java.security.AccessControlContext");
- accessControllerGetContext = MethodHandles.lookup()
- .findStatic(accessControllerClass, "getContext",
- MethodType.methodType(accessControlContextClass));
- subjectGetSubject = MethodHandles.lookup()
- .findStatic(Subject.class, "getSubject",
- MethodType.methodType(Subject.class, accessControlContextClass));
- } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) {
- // E.g. pre Java 18+
- }
-
- ACCESS_CONTROLLER_GET_CONTEXT = accessControllerGetContext;
- SUBJECT_GET_SUBJECT = subjectGetSubject;
-
- MethodHandle subjectDoAs = null;
- try {
- subjectDoAs = MethodHandles.lookup().findStatic(Subject.class, "doAs",
- MethodType.methodType(Object.class, Subject.class, PrivilegedAction.class));
- } catch (NoSuchMethodException | IllegalAccessException ignore) {
- }
- SUBJECT_DO_AS = subjectDoAs;
-
- MethodHandle subjectCallAs = null;
- try {
- subjectCallAs = MethodHandles.lookup().findStatic(Subject.class, "callAs",
- MethodType.methodType(Object.class, Subject.class, Callable.class));
- } catch (NoSuchMethodException | IllegalAccessException ignore) {
- }
- SUBJECT_CALL_AS = subjectCallAs;
- }
-
- /**
- * Use {@code Subject.current()} in Java 18+, and
- * {@code Subject.getSubject(AccessController.getContext())} in Java before 18.
- * @return current Subject or null
- */
- @SuppressWarnings("deprecation")
- private static Subject getCurrentSubject() {
- try {
- if (SUBJECT_CURRENT != null) {
- return (Subject) SUBJECT_CURRENT.invokeExact();
- }
- if (SUBJECT_GET_SUBJECT == null || ACCESS_CONTROLLER_GET_CONTEXT == null) {
- return null;
- }
- return (Subject) SUBJECT_GET_SUBJECT.invoke(
- ACCESS_CONTROLLER_GET_CONTEXT.invoke()
- );
- } catch (Throwable e) {
- if (e instanceof RuntimeException) {
- throw (RuntimeException) e;
- }
- if (e instanceof Error) {
- throw (Error) e;
- }
- throw new RuntimeException(e);
- }
- }
-
- public static void authenticate(boolean encrypted,
- PGStream pgStream, String host, String user, char [] password,
- String jaasApplicationName, String kerberosServerName,
- boolean useSpnego, boolean jaasLogin,
- boolean logServerErrorDetail)
- throws IOException, PSQLException {
- LOGGER.log(Level.FINEST, " <=BE AuthenticationReqGSS");
-
- if (jaasApplicationName == null) {
- jaasApplicationName = PGProperty.JAAS_APPLICATION_NAME.getDefaultValue();
- }
- if (kerberosServerName == null) {
- kerberosServerName = "postgres";
- }
-
- Exception result;
- try {
- boolean performAuthentication = jaasLogin;
-
- //Check if we can get credential from subject to avoid login.
- Subject sub = getCurrentSubject();
- if (sub != null) {
- Set gssCreds = sub.getPrivateCredentials(GSSCredential.class);
- if (gssCreds != null && !gssCreds.isEmpty()) {
- performAuthentication = false;
+ static {
+ MethodHandle subjectCurrent = null;
+ try {
+ subjectCurrent = MethodHandles.lookup()
+ .findStatic(Subject.class, "current", MethodType.methodType(Subject.class));
+ } catch (NoSuchMethodException | IllegalAccessException ignore) {
+ // E.g. pre Java 18
}
- }
- if (performAuthentication) {
- LoginContext lc = new LoginContext(jaasApplicationName, new GSSCallbackHandler(user, password));
- lc.login();
- sub = lc.getSubject();
- }
+ SUBJECT_CURRENT = subjectCurrent;
- PrivilegedAction action;
- if ( encrypted ) {
- action = new GssEncAction(pgStream, sub, host, user,
- kerberosServerName, useSpnego, logServerErrorDetail);
- } else {
- action = new GssAction(pgStream, sub, host, user,
- kerberosServerName, useSpnego, logServerErrorDetail);
- }
- @SuppressWarnings({"cast.unsafe", "assignment"})
- Subject subject = sub;
- if (SUBJECT_DO_AS != null) {
- result = (Exception) SUBJECT_DO_AS.invoke(subject, action);
- } else if (SUBJECT_CALL_AS != null) {
- result = (Exception) SUBJECT_CALL_AS.invoke(subject, action);
- } else {
- throw new PSQLException(
- GT.tr("Neither Subject.doAs (Java before 18) nor Subject.callAs (Java 18+) method found"),
- PSQLState.OBJECT_NOT_IN_STATE);
- }
- } catch (Throwable e) {
- throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, e);
+ MethodHandle accessControllerGetContext = null;
+ MethodHandle subjectGetSubject = null;
+
+ try {
+ Class> accessControllerClass = Class.forName("java.security.AccessController");
+ Class> accessControlContextClass =
+ Class.forName("java.security.AccessControlContext");
+ accessControllerGetContext = MethodHandles.lookup()
+ .findStatic(accessControllerClass, "getContext",
+ MethodType.methodType(accessControlContextClass));
+ subjectGetSubject = MethodHandles.lookup()
+ .findStatic(Subject.class, "getSubject",
+ MethodType.methodType(Subject.class, accessControlContextClass));
+ } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) {
+ // E.g. pre Java 18+
+ }
+
+ ACCESS_CONTROLLER_GET_CONTEXT = accessControllerGetContext;
+ SUBJECT_GET_SUBJECT = subjectGetSubject;
+
+ MethodHandle subjectDoAs = null;
+ try {
+ subjectDoAs = MethodHandles.lookup().findStatic(Subject.class, "doAs",
+ MethodType.methodType(Object.class, Subject.class, PrivilegedAction.class));
+ } catch (NoSuchMethodException | IllegalAccessException ignore) {
+ }
+ SUBJECT_DO_AS = subjectDoAs;
+
+ MethodHandle subjectCallAs = null;
+ try {
+ subjectCallAs = MethodHandles.lookup().findStatic(Subject.class, "callAs",
+ MethodType.methodType(Object.class, Subject.class, Callable.class));
+ } catch (NoSuchMethodException | IllegalAccessException ignore) {
+ }
+ SUBJECT_CALL_AS = subjectCallAs;
}
- if (result instanceof IOException) {
- throw (IOException) result;
- } else if (result instanceof PSQLException) {
- throw (PSQLException) result;
- } else if (result != null) {
- throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
- result);
+ /**
+ * Use {@code Subject.current()} in Java 18+, and
+ * {@code Subject.getSubject(AccessController.getContext())} in Java before 18.
+ *
+ * @return current Subject or null
+ */
+ @SuppressWarnings("deprecation")
+ private static Subject getCurrentSubject() {
+ try {
+ if (SUBJECT_CURRENT != null) {
+ return (Subject) SUBJECT_CURRENT.invokeExact();
+ }
+ if (SUBJECT_GET_SUBJECT == null || ACCESS_CONTROLLER_GET_CONTEXT == null) {
+ return null;
+ }
+ return (Subject) SUBJECT_GET_SUBJECT.invoke(
+ ACCESS_CONTROLLER_GET_CONTEXT.invoke()
+ );
+ } catch (Throwable e) {
+ if (e instanceof RuntimeException) {
+ throw (RuntimeException) e;
+ }
+ if (e instanceof Error) {
+ throw (Error) e;
+ }
+ throw new RuntimeException(e);
+ }
}
- }
+ public static void authenticate(boolean encrypted,
+ PGStream pgStream, String host, String user, char[] password,
+ String jaasApplicationName, String kerberosServerName,
+ boolean useSpnego, boolean jaasLogin,
+ boolean logServerErrorDetail)
+ throws IOException, PSQLException {
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationReqGSS");
+
+ if (jaasApplicationName == null) {
+ jaasApplicationName = PGProperty.JAAS_APPLICATION_NAME.getDefaultValue();
+ }
+ if (kerberosServerName == null) {
+ kerberosServerName = "postgres";
+ }
+
+ Exception result;
+ try {
+ boolean performAuthentication = jaasLogin;
+
+ //Check if we can get credential from subject to avoid login.
+ Subject sub = getCurrentSubject();
+ if (sub != null) {
+ Set gssCreds = sub.getPrivateCredentials(GSSCredential.class);
+ if (gssCreds != null && !gssCreds.isEmpty()) {
+ performAuthentication = false;
+ }
+ }
+ if (performAuthentication) {
+ LoginContext lc = new LoginContext(jaasApplicationName, new GSSCallbackHandler(user, password));
+ lc.login();
+ sub = lc.getSubject();
+ }
+
+ PrivilegedAction action;
+ if (encrypted) {
+ action = new GssEncAction(pgStream, sub, host, user,
+ kerberosServerName, useSpnego, logServerErrorDetail);
+ } else {
+ action = new GssAction(pgStream, sub, host, user,
+ kerberosServerName, useSpnego, logServerErrorDetail);
+ }
+ @SuppressWarnings({"cast.unsafe", "assignment"})
+ Subject subject = sub;
+ if (SUBJECT_DO_AS != null) {
+ result = (Exception) SUBJECT_DO_AS.invoke(subject, action);
+ } else if (SUBJECT_CALL_AS != null) {
+ result = (Exception) SUBJECT_CALL_AS.invoke(subject, action);
+ } else {
+ throw new PSQLException(
+ GT.tr("Neither Subject.doAs (Java before 18) nor Subject.callAs (Java 18+) method found"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ } catch (Throwable e) {
+ throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, e);
+ }
+
+ if (result instanceof IOException) {
+ throw (IOException) result;
+ } else if (result instanceof PSQLException) {
+ throw (PSQLException) result;
+ } else if (result != null) {
+ throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
+ result);
+ }
+
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java
index b0303e3..6f44165 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java
@@ -11,11 +11,11 @@ import org.postgresql.util.HostSpec;
* Candidate host to be connected.
*/
public class CandidateHost {
- public final HostSpec hostSpec;
- public final HostRequirement targetServerType;
+ public final HostSpec hostSpec;
+ public final HostRequirement targetServerType;
- public CandidateHost(HostSpec hostSpec, HostRequirement targetServerType) {
- this.hostSpec = hostSpec;
- this.targetServerType = targetServerType;
- }
+ public CandidateHost(HostSpec hostSpec, HostRequirement targetServerType) {
+ this.hostSpec = hostSpec;
+ this.targetServerType = targetServerType;
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java
index d8a93b7..9b7f0d5 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java
@@ -18,67 +18,67 @@ import java.util.Map;
*/
@SuppressWarnings("try")
public class GlobalHostStatusTracker {
- private static final Map hostStatusMap =
- new HashMap<>();
- private static final ResourceLock lock = new ResourceLock();
+ private static final Map hostStatusMap =
+ new HashMap<>();
+ private static final ResourceLock lock = new ResourceLock();
- /**
- * Store the actual observed host status.
- *
- * @param hostSpec The host whose status is known.
- * @param hostStatus Latest known status for the host.
- */
- public static void reportHostStatus(HostSpec hostSpec, HostStatus hostStatus) {
- long now = System.nanoTime() / 1000000;
- try (ResourceLock ignore = lock.obtain()) {
- HostSpecStatus hostSpecStatus = hostStatusMap.get(hostSpec);
- if (hostSpecStatus == null) {
- hostSpecStatus = new HostSpecStatus(hostSpec);
- hostStatusMap.put(hostSpec, hostSpecStatus);
- }
- hostSpecStatus.status = hostStatus;
- hostSpecStatus.lastUpdated = now;
- }
- }
-
- /**
- * Returns a list of candidate hosts that have the required targetServerType.
- *
- * @param hostSpecs The potential list of hosts.
- * @param targetServerType The required target server type.
- * @param hostRecheckMillis How stale information is allowed.
- * @return candidate hosts to connect to.
- */
- static List getCandidateHosts(HostSpec[] hostSpecs,
- HostRequirement targetServerType, long hostRecheckMillis) {
- List candidates = new ArrayList<>(hostSpecs.length);
- long latestAllowedUpdate = System.nanoTime() / 1000000 - hostRecheckMillis;
- try (ResourceLock ignore = lock.obtain()) {
- for (HostSpec hostSpec : hostSpecs) {
- HostSpecStatus hostInfo = hostStatusMap.get(hostSpec);
- // candidates are nodes we do not know about and the nodes with correct type
- if (hostInfo == null
- || hostInfo.lastUpdated < latestAllowedUpdate
- || targetServerType.allowConnectingTo(hostInfo.status)) {
- candidates.add(hostSpec);
+ /**
+ * Store the actual observed host status.
+ *
+ * @param hostSpec The host whose status is known.
+ * @param hostStatus Latest known status for the host.
+ */
+ public static void reportHostStatus(HostSpec hostSpec, HostStatus hostStatus) {
+ long now = System.nanoTime() / 1000000;
+ try (ResourceLock ignore = lock.obtain()) {
+ HostSpecStatus hostSpecStatus = hostStatusMap.get(hostSpec);
+ if (hostSpecStatus == null) {
+ hostSpecStatus = new HostSpecStatus(hostSpec);
+ hostStatusMap.put(hostSpec, hostSpecStatus);
+ }
+ hostSpecStatus.status = hostStatus;
+ hostSpecStatus.lastUpdated = now;
}
- }
- }
- return candidates;
- }
-
- static class HostSpecStatus {
- final HostSpec host;
- HostStatus status;
- long lastUpdated;
-
- HostSpecStatus(HostSpec host) {
- this.host = host;
}
- @Override
- public String toString() {
- return host.toString() + '=' + status;
+ /**
+ * Returns a list of candidate hosts that have the required targetServerType.
+ *
+ * @param hostSpecs The potential list of hosts.
+ * @param targetServerType The required target server type.
+ * @param hostRecheckMillis How stale information is allowed.
+ * @return candidate hosts to connect to.
+ */
+ static List getCandidateHosts(HostSpec[] hostSpecs,
+ HostRequirement targetServerType, long hostRecheckMillis) {
+ List candidates = new ArrayList<>(hostSpecs.length);
+ long latestAllowedUpdate = System.nanoTime() / 1000000 - hostRecheckMillis;
+ try (ResourceLock ignore = lock.obtain()) {
+ for (HostSpec hostSpec : hostSpecs) {
+ HostSpecStatus hostInfo = hostStatusMap.get(hostSpec);
+ // candidates are nodes we do not know about and the nodes with correct type
+ if (hostInfo == null
+ || hostInfo.lastUpdated < latestAllowedUpdate
+ || targetServerType.allowConnectingTo(hostInfo.status)) {
+ candidates.add(hostSpec);
+ }
+ }
+ }
+ return candidates;
+ }
+
+ static class HostSpecStatus {
+ final HostSpec host;
+ HostStatus status;
+ long lastUpdated;
+
+ HostSpecStatus(HostSpec host) {
+ this.host = host;
+ }
+
+ @Override
+ public String toString() {
+ return host.toString() + '=' + status;
+ }
}
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java
index a506b7b..862cdf3 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java
@@ -11,11 +11,11 @@ import java.util.Iterator;
* Lists connections in preferred order.
*/
public interface HostChooser extends Iterable {
- /**
- * Lists connection hosts in preferred order.
- *
- * @return connection hosts in preferred order.
- */
- @Override
- Iterator iterator();
+ /**
+ * Lists connection hosts in preferred order.
+ *
+ * @return connection hosts in preferred order.
+ */
+ @Override
+ Iterator iterator();
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java
index 4099fa0..bd6c7b5 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java
@@ -14,11 +14,11 @@ import java.util.Properties;
*/
public class HostChooserFactory {
- public static HostChooser createHostChooser(HostSpec[] hostSpecs,
- HostRequirement targetServerType, Properties info) {
- if (hostSpecs.length == 1) {
- return new SingleHostChooser(hostSpecs[0], targetServerType);
+ public static HostChooser createHostChooser(HostSpec[] hostSpecs,
+ HostRequirement targetServerType, Properties info) {
+ if (hostSpecs.length == 1) {
+ return new SingleHostChooser(hostSpecs[0], targetServerType);
+ }
+ return new MultiHostChooser(hostSpecs, targetServerType, info);
}
- return new MultiHostChooser(hostSpecs, targetServerType, info);
- }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java
index 666bb9f..bef9b68 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java
@@ -9,68 +9,68 @@ package org.postgresql.hostchooser;
* Describes the required server type.
*/
public enum HostRequirement {
- any {
- @Override
- public boolean allowConnectingTo(HostStatus status) {
- return status != HostStatus.ConnectFail;
- }
- },
- /**
- * @deprecated we no longer use the terms master or slave in the driver, or the PostgreSQL
- * project.
- */
- @Deprecated
- master {
- @Override
- public boolean allowConnectingTo(HostStatus status) {
- return primary.allowConnectingTo(status);
- }
- },
- primary {
- @Override
- public boolean allowConnectingTo(HostStatus status) {
- return status == HostStatus.Primary || status == HostStatus.ConnectOK;
- }
- },
- secondary {
- @Override
- public boolean allowConnectingTo(HostStatus status) {
- return status == HostStatus.Secondary || status == HostStatus.ConnectOK;
- }
- },
- preferSecondary {
- @Override
- public boolean allowConnectingTo(HostStatus status) {
- return status != HostStatus.ConnectFail;
- }
- },
- preferPrimary {
- @Override
- public boolean allowConnectingTo(HostStatus status) {
- return status != HostStatus.ConnectFail;
- }
- };
+ any {
+ @Override
+ public boolean allowConnectingTo(HostStatus status) {
+ return status != HostStatus.ConnectFail;
+ }
+ },
+ /**
+ * @deprecated we no longer use the terms master or slave in the driver, or the PostgreSQL
+ * project.
+ */
+ @Deprecated
+ master {
+ @Override
+ public boolean allowConnectingTo(HostStatus status) {
+ return primary.allowConnectingTo(status);
+ }
+ },
+ primary {
+ @Override
+ public boolean allowConnectingTo(HostStatus status) {
+ return status == HostStatus.Primary || status == HostStatus.ConnectOK;
+ }
+ },
+ secondary {
+ @Override
+ public boolean allowConnectingTo(HostStatus status) {
+ return status == HostStatus.Secondary || status == HostStatus.ConnectOK;
+ }
+ },
+ preferSecondary {
+ @Override
+ public boolean allowConnectingTo(HostStatus status) {
+ return status != HostStatus.ConnectFail;
+ }
+ },
+ preferPrimary {
+ @Override
+ public boolean allowConnectingTo(HostStatus status) {
+ return status != HostStatus.ConnectFail;
+ }
+ };
- public abstract boolean allowConnectingTo(HostStatus status);
+ /**
+ *
The postgreSQL project has decided not to use the term slave to refer to alternate servers.
+ * secondary or standby is preferred. We have arbitrarily chosen secondary.
+ * As of Jan 2018 in order not to break existing code we are going to accept both slave or
+ * secondary for names of alternate servers.
+ *
+ *
The current policy is to keep accepting this silently but not document slave, or slave preferSlave
+ *
+ *
As of Jul 2018 silently deprecate the use of the word master as well
+ *
+ * @param targetServerType the value of {@code targetServerType} connection property
+ * @return HostRequirement
+ */
- /**
- *
The postgreSQL project has decided not to use the term slave to refer to alternate servers.
- * secondary or standby is preferred. We have arbitrarily chosen secondary.
- * As of Jan 2018 in order not to break existing code we are going to accept both slave or
- * secondary for names of alternate servers.
- *
- *
The current policy is to keep accepting this silently but not document slave, or slave preferSlave
- *
- *
As of Jul 2018 silently deprecate the use of the word master as well
- *
- * @param targetServerType the value of {@code targetServerType} connection property
- * @return HostRequirement
- */
+ public static HostRequirement getTargetServerType(String targetServerType) {
- public static HostRequirement getTargetServerType(String targetServerType) {
+ String allowSlave = targetServerType.replace("lave", "econdary").replace("master", "primary");
+ return valueOf(allowSlave);
+ }
- String allowSlave = targetServerType.replace("lave", "econdary").replace("master", "primary");
- return valueOf(allowSlave);
- }
+ public abstract boolean allowConnectingTo(HostStatus status);
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java
index d303e8d..28a0780 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java
@@ -9,8 +9,8 @@ package org.postgresql.hostchooser;
* Known state of a server.
*/
public enum HostStatus {
- ConnectFail,
- ConnectOK,
- Primary,
- Secondary
+ ConnectFail,
+ ConnectOK,
+ Primary,
+ Secondary
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java
index 953417f..db2bb05 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java
@@ -22,117 +22,117 @@ import java.util.Properties;
* HostChooser that keeps track of known host statuses.
*/
class MultiHostChooser implements HostChooser {
- private final HostSpec[] hostSpecs;
- private final HostRequirement targetServerType;
- private int hostRecheckTime;
- private boolean loadBalance;
+ private final HostSpec[] hostSpecs;
+ private final HostRequirement targetServerType;
+ private int hostRecheckTime;
+ private boolean loadBalance;
- MultiHostChooser(HostSpec[] hostSpecs, HostRequirement targetServerType,
- Properties info) {
- this.hostSpecs = hostSpecs;
- this.targetServerType = targetServerType;
- try {
- hostRecheckTime = PGProperty.HOST_RECHECK_SECONDS.getInt(info) * 1000;
- loadBalance = PGProperty.LOAD_BALANCE_HOSTS.getBoolean(info);
- } catch (PSQLException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public Iterator iterator() {
- Iterator res = candidateIterator();
- if (!res.hasNext()) {
- // In case all the candidate hosts are unavailable or do not match, try all the hosts just in case
- List allHosts = Arrays.asList(hostSpecs);
- if (loadBalance) {
- allHosts = new ArrayList<>(allHosts);
- shuffle(allHosts);
- }
- res = withReqStatus(targetServerType, allHosts).iterator();
- }
- return res;
- }
-
- private Iterator candidateIterator() {
- if ( targetServerType != HostRequirement.preferSecondary
- && targetServerType != HostRequirement.preferPrimary ) {
- return getCandidateHosts(targetServerType).iterator();
+ MultiHostChooser(HostSpec[] hostSpecs, HostRequirement targetServerType,
+ Properties info) {
+ this.hostSpecs = hostSpecs;
+ this.targetServerType = targetServerType;
+ try {
+ hostRecheckTime = PGProperty.HOST_RECHECK_SECONDS.getInt(info) * 1000;
+ loadBalance = PGProperty.LOAD_BALANCE_HOSTS.getBoolean(info);
+ } catch (PSQLException e) {
+ throw new RuntimeException(e);
+ }
}
- HostRequirement preferredServerType =
- targetServerType == HostRequirement.preferSecondary
- ? HostRequirement.secondary
- : HostRequirement.primary;
-
- // preferSecondary tries to find secondary hosts first
- // Note: sort does not work here since there are "unknown" hosts,
- // and that "unknown" might turn out to be master, so we should discard that
- // if other secondaries exist
- // Same logic as the above works for preferPrimary if we replace "secondary"
- // with "primary" and vice versa
- List preferred = getCandidateHosts(preferredServerType);
- List any = getCandidateHosts(HostRequirement.any);
-
- if ( !preferred.isEmpty() && !any.isEmpty()
- && preferred.get(preferred.size() - 1).hostSpec.equals(any.get(0).hostSpec)) {
- // When the last preferred host's hostspec is the same as the first in "any" list, there's no need
- // to attempt to connect it as "preferred"
- // Note: this is only an optimization
- preferred = rtrim(1, preferred);
+ @Override
+ public Iterator iterator() {
+ Iterator res = candidateIterator();
+ if (!res.hasNext()) {
+ // In case all the candidate hosts are unavailable or do not match, try all the hosts just in case
+ List allHosts = Arrays.asList(hostSpecs);
+ if (loadBalance) {
+ allHosts = new ArrayList<>(allHosts);
+ shuffle(allHosts);
+ }
+ res = withReqStatus(targetServerType, allHosts).iterator();
+ }
+ return res;
}
- return append(preferred, any).iterator();
- }
- private List getCandidateHosts(HostRequirement hostRequirement) {
- List candidates =
- GlobalHostStatusTracker.getCandidateHosts(hostSpecs, hostRequirement, hostRecheckTime);
- if (loadBalance) {
- shuffle(candidates);
+ private Iterator candidateIterator() {
+ if (targetServerType != HostRequirement.preferSecondary
+ && targetServerType != HostRequirement.preferPrimary) {
+ return getCandidateHosts(targetServerType).iterator();
+ }
+
+ HostRequirement preferredServerType =
+ targetServerType == HostRequirement.preferSecondary
+ ? HostRequirement.secondary
+ : HostRequirement.primary;
+
+ // preferSecondary tries to find secondary hosts first
+ // Note: sort does not work here since there are "unknown" hosts,
+ // and that "unknown" might turn out to be master, so we should discard that
+ // if other secondaries exist
+ // Same logic as the above works for preferPrimary if we replace "secondary"
+ // with "primary" and vice versa
+ List preferred = getCandidateHosts(preferredServerType);
+ List any = getCandidateHosts(HostRequirement.any);
+
+ if (!preferred.isEmpty() && !any.isEmpty()
+ && preferred.get(preferred.size() - 1).hostSpec.equals(any.get(0).hostSpec)) {
+ // When the last preferred host's hostspec is the same as the first in "any" list, there's no need
+ // to attempt to connect it as "preferred"
+ // Note: this is only an optimization
+ preferred = rtrim(1, preferred);
+ }
+ return append(preferred, any).iterator();
}
- return withReqStatus(hostRequirement, candidates);
- }
- private List withReqStatus(final HostRequirement requirement, final List hosts) {
- return new AbstractList() {
- @Override
- public CandidateHost get(int index) {
- return new CandidateHost(hosts.get(index), requirement);
- }
+ private List getCandidateHosts(HostRequirement hostRequirement) {
+ List candidates =
+ GlobalHostStatusTracker.getCandidateHosts(hostSpecs, hostRequirement, hostRecheckTime);
+ if (loadBalance) {
+ shuffle(candidates);
+ }
+ return withReqStatus(hostRequirement, candidates);
+ }
- @Override
- public int size() {
- return hosts.size();
- }
- };
- }
+ private List withReqStatus(final HostRequirement requirement, final List hosts) {
+ return new AbstractList() {
+ @Override
+ public CandidateHost get(int index) {
+ return new CandidateHost(hosts.get(index), requirement);
+ }
- private List append(final List a, final List b) {
- return new AbstractList() {
- @Override
- public T get(int index) {
- return index < a.size() ? a.get(index) : b.get(index - a.size());
- }
+ @Override
+ public int size() {
+ return hosts.size();
+ }
+ };
+ }
- @Override
- public int size() {
- return a.size() + b.size();
- }
- };
- }
+ private List append(final List a, final List b) {
+ return new AbstractList() {
+ @Override
+ public T get(int index) {
+ return index < a.size() ? a.get(index) : b.get(index - a.size());
+ }
- private List rtrim(final int size, final List a) {
- return new AbstractList() {
- @Override
- public T get(int index) {
- return a.get(index);
- }
+ @Override
+ public int size() {
+ return a.size() + b.size();
+ }
+ };
+ }
- @Override
- public int size() {
- return Math.max(0, a.size() - size);
- }
- };
- }
+ private List rtrim(final int size, final List a) {
+ return new AbstractList() {
+ @Override
+ public T get(int index) {
+ return a.get(index);
+ }
+
+ @Override
+ public int size() {
+ return Math.max(0, a.size() - size);
+ }
+ };
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java
index e79e834..d80245b 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java
@@ -15,14 +15,14 @@ import java.util.Iterator;
* Host chooser that returns the single host.
*/
class SingleHostChooser implements HostChooser {
- private final Collection candidateHost;
+ private final Collection candidateHost;
- SingleHostChooser(HostSpec hostSpec, HostRequirement targetServerType) {
- this.candidateHost = Collections.singletonList(new CandidateHost(hostSpec, targetServerType));
- }
+ SingleHostChooser(HostSpec hostSpec, HostRequirement targetServerType) {
+ this.candidateHost = Collections.singletonList(new CandidateHost(hostSpec, targetServerType));
+ }
- @Override
- public Iterator iterator() {
- return candidateHost.iterator();
- }
+ @Override
+ public Iterator iterator() {
+ return candidateHost.iterator();
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java b/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java
index d50ebf4..bc0d762 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java
@@ -26,269 +26,266 @@ import java.util.ArrayList;
*/
@SuppressWarnings("try")
public abstract class AbstractBlobClob {
- protected BaseConnection conn;
+ protected final ResourceLock lock = new ResourceLock();
+ private final boolean support64bit;
+ private final long oid;
+ protected BaseConnection conn;
+ private LargeObject currentLo;
+ private boolean currentLoIsWriteable;
+ /**
+ * We create separate LargeObjects for methods that use streams so they won't interfere with each
+ * other.
+ */
+ private ArrayList subLOs = new ArrayList();
- private LargeObject currentLo;
- private boolean currentLoIsWriteable;
- private final boolean support64bit;
+ public AbstractBlobClob(BaseConnection conn, long oid) throws SQLException {
+ this.conn = conn;
+ this.oid = oid;
+ this.currentLoIsWriteable = false;
- /**
- * We create separate LargeObjects for methods that use streams so they won't interfere with each
- * other.
- */
- private ArrayList subLOs = new ArrayList();
+ support64bit = conn.haveMinimumServerVersion(90300);
+ }
- protected final ResourceLock lock = new ResourceLock();
- private final long oid;
-
- public AbstractBlobClob(BaseConnection conn, long oid) throws SQLException {
- this.conn = conn;
- this.oid = oid;
- this.currentLoIsWriteable = false;
-
- support64bit = conn.haveMinimumServerVersion(90300);
- }
-
- public void free() throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- if (currentLo != null) {
- currentLo.close();
- currentLo = null;
- currentLoIsWriteable = false;
- }
- if (subLOs != null) {
- for (LargeObject subLO : subLOs) {
- subLO.close();
+ public void free() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (currentLo != null) {
+ currentLo.close();
+ currentLo = null;
+ currentLoIsWriteable = false;
+ }
+ if (subLOs != null) {
+ for (LargeObject subLO : subLOs) {
+ subLO.close();
+ }
+ }
+ subLOs = null;
}
- }
- subLOs = null;
}
- }
- /**
- * For Blobs this should be in bytes while for Clobs it should be in characters. Since we really
- * haven't figured out how to handle character sets for Clobs the current implementation uses
- * bytes for both Blobs and Clobs.
- *
- * @param len maximum length
- * @throws SQLException if operation fails
- */
- public void truncate(long len) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- checkFreed();
- if (!conn.haveMinimumServerVersion(ServerVersion.v8_3)) {
- throw new PSQLException(
- GT.tr("Truncation of large objects is only implemented in 8.3 and later servers."),
- PSQLState.NOT_IMPLEMENTED);
- }
+ /**
+ * For Blobs this should be in bytes while for Clobs it should be in characters. Since we really
+ * haven't figured out how to handle character sets for Clobs the current implementation uses
+ * bytes for both Blobs and Clobs.
+ *
+ * @param len maximum length
+ * @throws SQLException if operation fails
+ */
+ public void truncate(long len) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ if (!conn.haveMinimumServerVersion(ServerVersion.v8_3)) {
+ throw new PSQLException(
+ GT.tr("Truncation of large objects is only implemented in 8.3 and later servers."),
+ PSQLState.NOT_IMPLEMENTED);
+ }
- if (len < 0) {
- throw new PSQLException(GT.tr("Cannot truncate LOB to a negative length."),
- PSQLState.INVALID_PARAMETER_VALUE);
- }
- if (len > Integer.MAX_VALUE) {
- if (support64bit) {
- getLo(true).truncate64(len);
- } else {
- throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
- PSQLState.INVALID_PARAMETER_VALUE);
+ if (len < 0) {
+ throw new PSQLException(GT.tr("Cannot truncate LOB to a negative length."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ if (len > Integer.MAX_VALUE) {
+ if (support64bit) {
+ getLo(true).truncate64(len);
+ } else {
+ throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ } else {
+ getLo(true).truncate((int) len);
+ }
}
- } else {
- getLo(true).truncate((int) len);
- }
}
- }
- public long length() throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- checkFreed();
- if (support64bit) {
- return getLo(false).size64();
- } else {
- return getLo(false).size();
- }
- }
- }
-
- public byte[] getBytes(long pos, int length) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- assertPosition(pos);
- getLo(false).seek((int) (pos - 1), LargeObject.SEEK_SET);
- return getLo(false).read(length);
- }
- }
-
- public InputStream getBinaryStream() throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- checkFreed();
- LargeObject subLO = getLo(false).copy();
- addSubLO(subLO);
- subLO.seek(0, LargeObject.SEEK_SET);
- return subLO.getInputStream();
- }
- }
-
- public OutputStream setBinaryStream(long pos) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- assertPosition(pos);
- LargeObject subLO = getLo(true).copy();
- addSubLO(subLO);
- subLO.seek((int) (pos - 1));
- return subLO.getOutputStream();
- }
- }
-
- /**
- * Iterate over the buffer looking for the specified pattern.
- *
- * @param pattern A pattern of bytes to search the blob for
- * @param start The position to start reading from
- * @return position of the specified pattern
- * @throws SQLException if something wrong happens
- */
- public long position(byte[] pattern, long start) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- assertPosition(start, pattern.length);
-
- int position = 1;
- int patternIdx = 0;
- long result = -1;
- int tmpPosition = 1;
-
- for (LOIterator i = new LOIterator(start - 1); i.hasNext(); position++) {
- byte b = i.next();
- if (b == pattern[patternIdx]) {
- if (patternIdx == 0) {
- tmpPosition = position;
- }
- patternIdx++;
- if (patternIdx == pattern.length) {
- result = tmpPosition;
- break;
- }
- } else {
- patternIdx = 0;
+ public long length() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ if (support64bit) {
+ return getLo(false).size64();
+ } else {
+ return getLo(false).size();
+ }
}
- }
-
- return result;
- }
- }
-
- /**
- * Iterates over a large object returning byte values. Will buffer the data from the large object.
- */
- private class LOIterator {
- private static final int BUFFER_SIZE = 8096;
- private final byte[] buffer = new byte[BUFFER_SIZE];
- private int idx = BUFFER_SIZE;
- private int numBytes = BUFFER_SIZE;
-
- LOIterator(long start) throws SQLException {
- getLo(false).seek((int) start);
}
- public boolean hasNext() throws SQLException {
- boolean result;
- if (idx < numBytes) {
- result = true;
- } else {
- numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE);
- idx = 0;
- result = numBytes > 0;
- }
- return result;
+ public byte[] getBytes(long pos, int length) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ assertPosition(pos);
+ getLo(false).seek((int) (pos - 1), LargeObject.SEEK_SET);
+ return getLo(false).read(length);
+ }
}
- private byte next() {
- return buffer[idx++];
+ public InputStream getBinaryStream() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ LargeObject subLO = getLo(false).copy();
+ addSubLO(subLO);
+ subLO.seek(0, LargeObject.SEEK_SET);
+ return subLO.getInputStream();
+ }
}
- }
- /**
- * This is simply passing the byte value of the pattern Blob.
- *
- * @param pattern search pattern
- * @param start start position
- * @return position of given pattern
- * @throws SQLException if something goes wrong
- */
- public long position(Blob pattern, long start) throws SQLException {
- return position(pattern.getBytes(1, (int) pattern.length()), start);
- }
-
- /**
- * Throws an exception if the pos value exceeds the max value by which the large object API can
- * index.
- *
- * @param pos Position to write at.
- * @throws SQLException if something goes wrong
- */
- protected void assertPosition(long pos) throws SQLException {
- assertPosition(pos, 0);
- }
-
- /**
- * Throws an exception if the pos value exceeds the max value by which the large object API can
- * index.
- *
- * @param pos Position to write at.
- * @param len number of bytes to write.
- * @throws SQLException if something goes wrong
- */
- protected void assertPosition(long pos, long len) throws SQLException {
- checkFreed();
- if (pos < 1) {
- throw new PSQLException(GT.tr("LOB positioning offsets start at 1."),
- PSQLState.INVALID_PARAMETER_VALUE);
+ public OutputStream setBinaryStream(long pos) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ assertPosition(pos);
+ LargeObject subLO = getLo(true).copy();
+ addSubLO(subLO);
+ subLO.seek((int) (pos - 1));
+ return subLO.getOutputStream();
+ }
}
- if (pos + len - 1 > Integer.MAX_VALUE) {
- throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
- PSQLState.INVALID_PARAMETER_VALUE);
+
+ /**
+ * Iterate over the buffer looking for the specified pattern.
+ *
+ * @param pattern A pattern of bytes to search the blob for
+ * @param start The position to start reading from
+ * @return position of the specified pattern
+ * @throws SQLException if something wrong happens
+ */
+ public long position(byte[] pattern, long start) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ assertPosition(start, pattern.length);
+
+ int position = 1;
+ int patternIdx = 0;
+ long result = -1;
+ int tmpPosition = 1;
+
+ for (LOIterator i = new LOIterator(start - 1); i.hasNext(); position++) {
+ byte b = i.next();
+ if (b == pattern[patternIdx]) {
+ if (patternIdx == 0) {
+ tmpPosition = position;
+ }
+ patternIdx++;
+ if (patternIdx == pattern.length) {
+ result = tmpPosition;
+ break;
+ }
+ } else {
+ patternIdx = 0;
+ }
+ }
+
+ return result;
+ }
}
- }
- /**
- * Checks that this LOB hasn't been free()d already.
- *
- * @throws SQLException if LOB has been freed.
- */
- protected void checkFreed() throws SQLException {
- if (subLOs == null) {
- throw new PSQLException(GT.tr("free() was called on this LOB previously"),
- PSQLState.OBJECT_NOT_IN_STATE);
+ /**
+ * This is simply passing the byte value of the pattern Blob.
+ *
+ * @param pattern search pattern
+ * @param start start position
+ * @return position of given pattern
+ * @throws SQLException if something goes wrong
+ */
+ public long position(Blob pattern, long start) throws SQLException {
+ return position(pattern.getBytes(1, (int) pattern.length()), start);
}
- }
- protected LargeObject getLo(boolean forWrite) throws SQLException {
- try (ResourceLock ignore = lock.obtain()) {
- LargeObject currentLo = this.currentLo;
- if (currentLo != null) {
- if (forWrite && !currentLoIsWriteable) {
- // Reopen the stream in read-write, at the same pos.
- int currentPos = currentLo.tell();
+ /**
+ * Throws an exception if the pos value exceeds the max value by which the large object API can
+ * index.
+ *
+ * @param pos Position to write at.
+ * @throws SQLException if something goes wrong
+ */
+ protected void assertPosition(long pos) throws SQLException {
+ assertPosition(pos, 0);
+ }
- LargeObjectManager lom = conn.getLargeObjectAPI();
- LargeObject newLo = lom.open(oid, LargeObjectManager.READWRITE);
- subLOs.add(currentLo);
- this.currentLo = currentLo = newLo;
+ /**
+ * Throws an exception if the pos value exceeds the max value by which the large object API can
+ * index.
+ *
+ * @param pos Position to write at.
+ * @param len number of bytes to write.
+ * @throws SQLException if something goes wrong
+ */
+ protected void assertPosition(long pos, long len) throws SQLException {
+ checkFreed();
+ if (pos < 1) {
+ throw new PSQLException(GT.tr("LOB positioning offsets start at 1."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ if (pos + len - 1 > Integer.MAX_VALUE) {
+ throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ }
- if (currentPos != 0) {
- currentLo.seek(currentPos);
- }
+ /**
+ * Checks that this LOB hasn't been free()d already.
+ *
+ * @throws SQLException if LOB has been freed.
+ */
+ protected void checkFreed() throws SQLException {
+ if (subLOs == null) {
+ throw new PSQLException(GT.tr("free() was called on this LOB previously"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ }
+
+ protected LargeObject getLo(boolean forWrite) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject currentLo = this.currentLo;
+ if (currentLo != null) {
+ if (forWrite && !currentLoIsWriteable) {
+ // Reopen the stream in read-write, at the same pos.
+ int currentPos = currentLo.tell();
+
+ LargeObjectManager lom = conn.getLargeObjectAPI();
+ LargeObject newLo = lom.open(oid, LargeObjectManager.READWRITE);
+ subLOs.add(currentLo);
+ this.currentLo = currentLo = newLo;
+
+ if (currentPos != 0) {
+ currentLo.seek(currentPos);
+ }
+ }
+
+ return currentLo;
+ }
+ LargeObjectManager lom = conn.getLargeObjectAPI();
+ this.currentLo = currentLo =
+ lom.open(oid, forWrite ? LargeObjectManager.READWRITE : LargeObjectManager.READ);
+ currentLoIsWriteable = forWrite;
+ return currentLo;
+ }
+ }
+
+ protected void addSubLO(LargeObject subLO) {
+ subLOs.add(subLO);
+ }
+
+ /**
+ * Iterates over a large object returning byte values. Will buffer the data from the large object.
+ */
+ private class LOIterator {
+ private static final int BUFFER_SIZE = 8096;
+ private final byte[] buffer = new byte[BUFFER_SIZE];
+ private int idx = BUFFER_SIZE;
+ private int numBytes = BUFFER_SIZE;
+
+ LOIterator(long start) throws SQLException {
+ getLo(false).seek((int) start);
}
- return currentLo;
- }
- LargeObjectManager lom = conn.getLargeObjectAPI();
- this.currentLo = currentLo =
- lom.open(oid, forWrite ? LargeObjectManager.READWRITE : LargeObjectManager.READ);
- currentLoIsWriteable = forWrite;
- return currentLo;
- }
- }
+ public boolean hasNext() throws SQLException {
+ boolean result;
+ if (idx < numBytes) {
+ result = true;
+ } else {
+ numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE);
+ idx = 0;
+ result = numBytes > 0;
+ }
+ return result;
+ }
- protected void addSubLO(LargeObject subLO) {
- subLOs.add(subLO);
- }
+ private byte next() {
+ return buffer[idx++];
+ }
+ }
}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java
index 9bee44d..475ddde 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java
@@ -44,761 +44,732 @@ import java.util.Map;
*/
public final class ArrayDecoding {
- public ArrayDecoding() {
- }
+ private static final ArrayDecoder LONG_OBJ_ARRAY = new AbstractObjectArrayDecoder(Long.class) {
- /**
- * Array list implementation specific for storing PG array elements. If
- * {@link PgArrayList#dimensionsCount} is {@code 1}, the contents will be
- * {@link String}. For all larger dimensionsCount, the values will be
- * {@link PgArrayList} instances.
- */
- @SuppressWarnings("serial")
- public static final class PgArrayList extends ArrayList {
-
- /**
- * How many dimensions.
- */
- int dimensionsCount = 1;
-
- public PgArrayList() {
- }
-
- }
-
- private interface ArrayDecoder {
-
- A createArray(int size);
-
- Object[] createMultiDimensionalArray(int[] sizes);
-
- boolean supportBinary();
-
- void populateFromBinary(A array, int index, int count, ByteBuffer bytes, BaseConnection connection)
- throws SQLException;
-
- void populateFromString(A array, List strings, BaseConnection connection) throws SQLException;
- }
-
- private abstract static class AbstractObjectStringArrayDecoder implements ArrayDecoder {
- final Class> baseClazz;
-
- AbstractObjectStringArrayDecoder(Class> baseClazz) {
- this.baseClazz = baseClazz;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean supportBinary() {
- return false;
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public A createArray(int size) {
- return (A) Array.newInstance(baseClazz, size);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public Object[] createMultiDimensionalArray(int[] sizes) {
- return (Object[]) Array.newInstance(baseClazz, sizes);
- }
-
- @Override
- public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
- throws SQLException {
- throw new SQLFeatureNotSupportedException();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void populateFromString(A arr, List strings, BaseConnection connection) throws SQLException {
- final Object[] array = (Object[]) arr;
-
- for (int i = 0, j = strings.size(); i < j; i++) {
- final String stringVal = strings.get(i);
- array[i] = stringVal != null ? parseValue(stringVal, connection) : null;
- }
- }
-
- abstract Object parseValue(String stringVal, BaseConnection connection) throws SQLException;
- }
-
- private abstract static class AbstractObjectArrayDecoder extends AbstractObjectStringArrayDecoder {
-
- AbstractObjectArrayDecoder(Class> baseClazz) {
- super(baseClazz);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean supportBinary() {
- return true;
- }
-
- @Override
- public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
- throws SQLException {
- final Object[] array = (Object[]) arr;
-
- // skip through to the requested index
- for (int i = 0; i < index; i++) {
- final int length = bytes.getInt();
- if (length > 0) {
- bytes.position(bytes.position() + length);
+ @Override
+ Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+ return bytes.getLong();
}
- }
- for (int i = 0; i < count; i++) {
- final int length = bytes.getInt();
- if (length != -1) {
- array[i] = parseValue(length, bytes, connection);
- } else {
- // explicitly set to null for reader's clarity
- array[i] = null;
+ @Override
+ Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+ return PgResultSet.toLong(stringVal);
}
- }
- }
+ };
+ private static final ArrayDecoder INT4_UNSIGNED_OBJ_ARRAY = new AbstractObjectArrayDecoder(
+ Long.class) {
- abstract Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException;
- }
-
- private static final ArrayDecoder LONG_OBJ_ARRAY = new AbstractObjectArrayDecoder(Long.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
- return bytes.getLong();
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PgResultSet.toLong(stringVal);
- }
- };
-
- private static final ArrayDecoder INT4_UNSIGNED_OBJ_ARRAY = new AbstractObjectArrayDecoder(
- Long.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
- return bytes.getInt() & 0xFFFFFFFFL;
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PgResultSet.toLong(stringVal);
- }
- };
-
- private static final ArrayDecoder INTEGER_OBJ_ARRAY = new AbstractObjectArrayDecoder(
- Integer.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
- return bytes.getInt();
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PgResultSet.toInt(stringVal);
- }
- };
-
- private static final ArrayDecoder SHORT_OBJ_ARRAY = new AbstractObjectArrayDecoder(Short.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
- return bytes.getShort();
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PgResultSet.toShort(stringVal);
- }
- };
-
- private static final ArrayDecoder DOUBLE_OBJ_ARRAY = new AbstractObjectArrayDecoder(
- Double.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
- return bytes.getDouble();
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PgResultSet.toDouble(stringVal);
- }
- };
-
- private static final ArrayDecoder FLOAT_OBJ_ARRAY = new AbstractObjectArrayDecoder(Float.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
- return bytes.getFloat();
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PgResultSet.toFloat(stringVal);
- }
- };
-
- private static final ArrayDecoder BOOLEAN_OBJ_ARRAY = new AbstractObjectArrayDecoder(
- Boolean.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
- return bytes.get() == 1;
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return BooleanTypeUtil.fromString(stringVal);
- }
- };
-
- private static final ArrayDecoder STRING_ARRAY = new AbstractObjectArrayDecoder<>(String.class) {
-
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
- assert bytes.hasArray();
- final byte[] byteArray = bytes.array();
- final int offset = bytes.arrayOffset() + bytes.position();
-
- String val;
- try {
- val = connection.getEncoding().decode(byteArray, offset, length);
- } catch (IOException e) {
- throw new PSQLException(GT.tr(
- "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."),
- PSQLState.DATA_ERROR, e);
- }
- bytes.position(bytes.position() + length);
- return val;
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return stringVal;
- }
- };
-
- private static final ArrayDecoder BYTE_ARRAY_ARRAY = new AbstractObjectArrayDecoder(
- byte[].class) {
-
- /**
- * {@inheritDoc}
- */
- @Override
- Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
- final byte[] array = new byte[length];
- bytes.get(array);
- return array;
- }
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PGbytea.toBytes(stringVal.getBytes(StandardCharsets.US_ASCII));
- }
- };
-
- private static final ArrayDecoder BIG_DECIMAL_STRING_DECODER = new AbstractObjectStringArrayDecoder(
- BigDecimal.class) {
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return PgResultSet.toBigDecimal(stringVal);
- }
- };
-
- private static final ArrayDecoder STRING_ONLY_DECODER = new AbstractObjectStringArrayDecoder(
- String.class) {
-
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return stringVal;
- }
- };
-
- private static final ArrayDecoder DATE_DECODER = new AbstractObjectStringArrayDecoder(
- Date.class) {
-
- @SuppressWarnings("deprecation")
- @Override
- Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
- return connection.getTimestampUtils().toDate(null, stringVal);
- }
- };
-
- private static final ArrayDecoder