add testcontainers

This commit is contained in:
Jörg Prante 2024-07-08 17:34:38 +02:00
parent 698d755934
commit ae8f10b9cf
614 changed files with 100376 additions and 98526 deletions

8
NOTICE.txt Normal file
View file

@ -0,0 +1,8 @@
This work integrates
pgjdbc - https://github.com/pgjdbc/pgjdbc (as of 1 Apr 2024)
stringprep - Stringprep (RFC 3454) Java implementation https://github.com/ongres/stringprep (as of 1 Apr 2024)
saslprep - a profile of stringprep
scram-client - SCRAM (RFC 5802) Java implementation https://github.com/ongres/scram (as of 1 Apr 2024)
All of those projects where modfied for Java 21+ with JPMS info.

View file

@ -1,4 +1,3 @@
group = org.xbib.jdbc
name = pgjdbc
version = 42.7.4.0

View file

@ -8,8 +8,8 @@ dependencies {
test {
useJUnitPlatform()
failFast = true
systemProperty 'java.util.logging.config.file', 'src/test/resources/logging.properties'
failFast = false
//systemProperty 'java.util.logging.config.file', 'src/test/resources/logging.properties'
testLogging {
events 'STARTED', 'PASSED', 'FAILED', 'SKIPPED'
showStandardStreams = true

Binary file not shown.

View file

@ -1,6 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME

20
gradlew.bat vendored
View file

@ -43,11 +43,11 @@ set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
echo. 1>&2
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
@ -57,11 +57,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
echo. 1>&2
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail

View file

@ -1,3 +1,30 @@
dependencies {
api project(':scram-client')
testImplementation testLibs.junit.runner
testImplementation testLibs.junit.jupiter.engine
testImplementation testLibs.bytebuddy
testImplementation testLibs.bytebuddy.agent
testImplementation testLibs.classloader.leak.test
testImplementation testLibs.testcontainers
testImplementation testLibs.testcontainers.junit.jupiter
testImplementation testLibs.testcontainers.postgresql
}
test {
systemProperty 'username', 'test'
systemProperty 'server', 'localhost'
systemProperty 'port', '5432'
systemProperty 'secondaryServer1', 'localhost'
systemProperty 'secondaryPort1', '5433'
systemProperty 'secondaryServer2', 'localhost'
systemProperty 'secondaryPort2', '5434'
systemProperty 'database', 'test'
systemProperty 'username', 'test'
systemProperty 'password', 'test'
systemProperty 'privilegedUser', 'postgres'
systemProperty 'privilegedPassword', ''
systemProperty 'sspiusername', 'testsspi'
systemProperty 'preparethreshold', '5'
systemProperty 'protocolVersion', '0'
systemProperty 'sslpassword', 'sslpwd'
}

View file

@ -59,10 +59,10 @@ import java.util.logging.Logger;
@SuppressWarnings("try")
public class Driver implements java.sql.Driver {
private static Driver registeredDriver;
private static final Logger PARENT_LOGGER = Logger.getLogger("org.postgresql");
private static final Logger LOGGER = Logger.getLogger("org.postgresql.Driver");
private static final SharedTimer SHARED_TIMER = new SharedTimer();
private static Driver registeredDriver;
static {
try {
@ -76,53 +76,14 @@ public class Driver implements java.sql.Driver {
}
}
private final ResourceLock lock = new ResourceLock();
// Helper to retrieve default properties from classloader resource
// properties files.
private Properties defaultProperties;
private final ResourceLock lock = new ResourceLock();
public Driver() {
}
private Properties getDefaultProperties() throws IOException {
try (ResourceLock ignore = lock.obtain()) {
if (defaultProperties != null) {
return defaultProperties;
}
// Make sure we load properties with the maximum possible privileges.
try {
defaultProperties =
doPrivileged(new PrivilegedExceptionAction<Properties>() {
@Override
public Properties run() throws IOException {
return loadDefaultProperties();
}
});
} catch (PrivilegedActionException e) {
Exception ex = e.getException();
if (ex instanceof IOException) {
throw (IOException) ex;
}
throw new RuntimeException(e);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
if (e instanceof Error) {
throw (Error) e;
}
throw new RuntimeException(e);
}
return defaultProperties;
}
}
@SuppressWarnings("unchecked")
private static <T> T doPrivileged(PrivilegedExceptionAction<T> action) throws Throwable {
try {
@ -137,302 +98,6 @@ public class Driver implements java.sql.Driver {
}
}
private Properties loadDefaultProperties() throws IOException {
Properties merged = new Properties();
try {
PGProperty.USER.set(merged, System.getProperty("user.name"));
} catch (SecurityException se) {
// We're just trying to set a default, so if we can't
// it's not a big deal.
}
// If we are loaded by the bootstrap classloader, getClassLoader()
// may return null. In that case, try to fall back to the system
// classloader.
//
// We should not need to catch SecurityException here as we are
// accessing either our own classloader, or the system classloader
// when our classloader is null. The ClassLoader javadoc claims
// neither case can throw SecurityException.
ClassLoader cl = getClass().getClassLoader();
if (cl == null) {
LOGGER.log(Level.FINE, "Can't find our classloader for the Driver; "
+ "attempt to use the system class loader");
cl = ClassLoader.getSystemClassLoader();
}
if (cl == null) {
LOGGER.log(Level.WARNING, "Can't find a classloader for the Driver; not loading driver "
+ "configuration from org/postgresql/driverconfig.properties");
return merged; // Give up on finding defaults.
}
LOGGER.log(Level.FINE, "Loading driver configuration via classloader {0}", cl);
// When loading the driver config files we don't want settings found
// in later files in the classpath to override settings specified in
// earlier files. To do this we've got to read the returned
// Enumeration into temporary storage.
ArrayList<URL> urls = new ArrayList<>();
Enumeration<URL> urlEnum = cl.getResources("org/postgresql/driverconfig.properties");
while (urlEnum.hasMoreElements()) {
urls.add(urlEnum.nextElement());
}
for (int i = urls.size() - 1; i >= 0; i--) {
URL url = urls.get(i);
LOGGER.log(Level.FINE, "Loading driver configuration from: {0}", url);
InputStream is = url.openStream();
merged.load(is);
is.close();
}
return merged;
}
/**
* <p>Try to make a database connection to the given URL. The driver should return "null" if it
* realizes it is the wrong kind of driver to connect to the given URL. This will be common, as
* when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each
* loaded driver in turn.</p>
*
* <p>The driver should raise an SQLException if it is the right driver to connect to the given URL,
* but has trouble connecting to the database.</p>
*
* <p>The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as
* connection arguments.</p>
*
* <ul>
* <li>user - (required) The user to connect as</li>
* <li>password - (optional) The password for the user</li>
* <li>ssl -(optional) Use SSL when connecting to the server</li>
* <li>readOnly - (optional) Set connection to read-only by default</li>
* <li>charSet - (optional) The character set to be used for converting to/from
* the database to unicode. If multibyte is enabled on the server then the character set of the
* database is used as the default, otherwise the jvm character encoding is used as the default.
* This value is only used when connecting to a 7.2 or older server.</li>
* <li>loglevel - (optional) Enable logging of messages from the driver. The value is an integer
* from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to
* DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.</li>
* <li>compatible - (optional) This is used to toggle between different functionality
* as it changes across different releases of the jdbc driver code. The values here are versions
* of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on
* LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in
* functionality could be disabled by setting the compatible level to be "7.1", in which case the
* driver will revert to the 7.1 functionality.</li>
* </ul>
*
* <p>Normally, at least "user" and "password" properties should be included in the properties. For a
* list of supported character encoding , see
* http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will
* probably want to have set up the Postgres database itself to use the same encoding, with the
* {@code -E <encoding>} argument to createdb.</p>
*
* <p>Our protocol takes the forms:</p>
*
* <pre>
* jdbc:postgresql://host:port/database?param1=val1&amp;...
* </pre>
*
* @param url the URL of the database to connect to
* @param info a list of arbitrary tag/value pairs as connection arguments
* @return a connection to the URL or null if it isnt us
* @exception SQLException if a database access error occurs or the url is
* {@code null}
* @see java.sql.Driver#connect
*/
@Override
public Connection connect(String url, Properties info) throws SQLException {
if (url == null) {
throw new SQLException("url is null");
}
// get defaults
Properties defaults;
if (!url.startsWith("jdbc:postgresql:")) {
return null;
}
try {
defaults = getDefaultProperties();
} catch (IOException ioe) {
throw new PSQLException(GT.tr("Error loading default settings from driverconfig.properties"),
PSQLState.UNEXPECTED_ERROR, ioe);
}
// override defaults with provided properties
Properties props = new Properties(defaults);
if (info != null) {
Set<String> e = info.stringPropertyNames();
for (String propName : e) {
String propValue = info.getProperty(propName);
if (propValue == null) {
throw new PSQLException(
GT.tr("Properties for the driver contains a non-string value for the key ")
+ propName,
PSQLState.UNEXPECTED_ERROR);
}
props.setProperty(propName, propValue);
}
}
// parse URL and add more properties
if ((props = parseURL(url, props)) == null) {
throw new PSQLException(
GT.tr("Unable to parse URL {0}", url),
PSQLState.UNEXPECTED_ERROR);
}
try {
LOGGER.log(Level.FINE, "Connecting with URL: {0}", url);
// Enforce login timeout, if specified, by running the connection
// attempt in a separate thread. If we hit the timeout without the
// connection completing, we abandon the connection attempt in
// the calling thread, but the separate thread will keep trying.
// Eventually, the separate thread will either fail or complete
// the connection; at that point we clean up the connection if
// we managed to establish one after all. See ConnectThread for
// more details.
long timeout = timeout(props);
if (timeout <= 0) {
return makeConnection(url, props);
}
ConnectThread ct = new ConnectThread(url, props);
Thread thread = new Thread(ct, "PostgreSQL JDBC driver connection thread");
thread.setDaemon(true); // Don't prevent the VM from shutting down
thread.start();
return ct.getResult(timeout);
} catch (PSQLException ex1) {
LOGGER.log(Level.FINE, "Connection error: ", ex1);
// re-throw the exception, otherwise it will be caught next, and a
// org.postgresql.unusual error will be returned instead.
throw ex1;
} catch (Exception ex2) {
if ("java.security.AccessControlException".equals(ex2.getClass().getName())) {
// java.security.AccessControlException has been deprecated for removal, so compare the class name
throw new PSQLException(
GT.tr(
"Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."),
PSQLState.UNEXPECTED_ERROR, ex2);
}
LOGGER.log(Level.FINE, "Unexpected connection error: ", ex2);
throw new PSQLException(
GT.tr(
"Something unusual has occurred to cause the driver to fail. Please report this exception."),
PSQLState.UNEXPECTED_ERROR, ex2);
}
}
/**
* this is an empty method left here for graalvm
* we removed the ability to setup the logger from properties
* due to a security issue
* @param props Connection Properties
*/
private void setupLoggerFromProperties(final Properties props) {
}
/**
* Perform a connect in a separate thread; supports getting the results from the original thread
* while enforcing a login timeout.
*/
private static class ConnectThread implements Runnable {
private final ResourceLock lock = new ResourceLock();
private final Condition lockCondition = lock.newCondition();
ConnectThread(String url, Properties props) {
this.url = url;
this.props = props;
}
@Override
public void run() {
Connection conn;
Throwable error;
try {
conn = makeConnection(url, props);
error = null;
} catch (Throwable t) {
conn = null;
error = t;
}
try (ResourceLock ignore = lock.obtain()) {
if (abandoned) {
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
}
}
} else {
result = conn;
resultException = error;
lockCondition.signal();
}
}
}
/**
* Get the connection result from this (assumed running) thread. If the timeout is reached
* without a result being available, a SQLException is thrown.
*
* @param timeout timeout in milliseconds
* @return the new connection, if successful
* @throws SQLException if a connection error occurs or the timeout is reached
*/
public Connection getResult(long timeout) throws SQLException {
long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout;
try (ResourceLock ignore = lock.obtain()) {
while (true) {
if (result != null) {
return result;
}
Throwable resultException = this.resultException;
if (resultException != null) {
if (resultException instanceof SQLException) {
resultException.fillInStackTrace();
throw (SQLException) resultException;
} else {
throw new PSQLException(
GT.tr(
"Something unusual has occurred to cause the driver to fail. Please report this exception."),
PSQLState.UNEXPECTED_ERROR, resultException);
}
}
long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
if (delay <= 0) {
abandoned = true;
throw new PSQLException(GT.tr("Connection attempt timed out."),
PSQLState.CONNECTION_UNABLE_TO_CONNECT);
}
try {
lockCondition.await(delay, TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
// reset the interrupt flag
Thread.currentThread().interrupt();
abandoned = true;
// throw an unchecked exception which will hopefully not be ignored by the calling code
throw new RuntimeException(GT.tr("Interrupted while attempting to connect."));
}
}
}
}
private final String url;
private final Properties props;
private Connection result;
private Throwable resultException;
private boolean abandoned;
}
/**
* Create a connection from URL and properties. Always does the connection work in the current
* thread without enforcing a timeout, regardless of any timeout specified in the properties.
@ -446,60 +111,6 @@ public class Driver implements java.sql.Driver {
return new PgConnection(hostSpecs(props), props, url);
}
/**
* Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers
* will return true if they understand the subprotocol specified in the URL and false if they
* don't. Our protocols start with jdbc:postgresql:
*
* @param url the URL of the driver
* @return true if this driver accepts the given URL
* @see java.sql.Driver#acceptsURL
*/
@Override
public boolean acceptsURL(String url) {
return parseURL(url, null) != null;
}
/**
* <p>The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties
* it should prompt a human for in order to get enough information to connect to a database.</p>
*
* <p>Note that depending on the values the human has supplied so far, additional values may become
* necessary, so it may be necessary to iterate through several calls to getPropertyInfo</p>
*
* @param url the Url of the database to connect to
* @param info a proposed list of tag/value pairs that will be sent on connect open.
* @return An array of DriverPropertyInfo objects describing possible properties. This array may
* be an empty array if no properties are required
* @see java.sql.Driver#getPropertyInfo
*/
@Override
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) {
Properties copy = new Properties(info);
Properties parse = parseURL(url, copy);
if (parse != null) {
copy = parse;
}
PGProperty[] knownProperties = PGProperty.values();
DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length];
for (int i = 0; i < props.length; i++) {
props[i] = knownProperties[i].toDriverPropertyInfo(copy);
}
return props;
}
@Override
public int getMajorVersion() {
return DriverInfo.MAJOR_VERSION;
}
@Override
public int getMinorVersion() {
return DriverInfo.MINOR_VERSION;
}
/**
* Returns the server version series of this driver and the specific build number.
*
@ -511,18 +122,6 @@ public class Driver implements java.sql.Driver {
return DriverInfo.DRIVER_FULL_NAME;
}
/**
* <p>Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true"
* here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC
* compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.</p>
*
* <p>For PostgreSQL, this is not yet possible, as we are not SQL92 compliant (yet).</p>
*/
@Override
public boolean jdbcCompliant() {
return false;
}
/**
* Constructs a new DriverURL, splitting the specified URL into its component parts.
*
@ -743,11 +342,6 @@ public class Driver implements java.sql.Driver {
PSQLState.NOT_IMPLEMENTED.getState());
}
@Override
public Logger getParentLogger() {
return PARENT_LOGGER;
}
public static SharedTimer getSharedTimer() {
return SHARED_TIMER;
}
@ -793,4 +387,408 @@ public class Driver implements java.sql.Driver {
public static boolean isRegistered() {
return registeredDriver != null;
}
private Properties getDefaultProperties() throws IOException {
try (ResourceLock ignore = lock.obtain()) {
if (defaultProperties != null) {
return defaultProperties;
}
// Make sure we load properties with the maximum possible privileges.
try {
defaultProperties =
doPrivileged(new PrivilegedExceptionAction<Properties>() {
@Override
public Properties run() throws IOException {
return loadDefaultProperties();
}
});
} catch (PrivilegedActionException e) {
Exception ex = e.getException();
if (ex instanceof IOException) {
throw (IOException) ex;
}
throw new RuntimeException(e);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
if (e instanceof Error) {
throw (Error) e;
}
throw new RuntimeException(e);
}
return defaultProperties;
}
}
private Properties loadDefaultProperties() throws IOException {
Properties merged = new Properties();
try {
PGProperty.USER.set(merged, System.getProperty("user.name"));
} catch (SecurityException se) {
// We're just trying to set a default, so if we can't
// it's not a big deal.
}
// If we are loaded by the bootstrap classloader, getClassLoader()
// may return null. In that case, try to fall back to the system
// classloader.
//
// We should not need to catch SecurityException here as we are
// accessing either our own classloader, or the system classloader
// when our classloader is null. The ClassLoader javadoc claims
// neither case can throw SecurityException.
ClassLoader cl = getClass().getClassLoader();
if (cl == null) {
LOGGER.log(Level.FINE, "Can't find our classloader for the Driver; "
+ "attempt to use the system class loader");
cl = ClassLoader.getSystemClassLoader();
}
if (cl == null) {
LOGGER.log(Level.WARNING, "Can't find a classloader for the Driver; not loading driver "
+ "configuration from org/postgresql/driverconfig.properties");
return merged; // Give up on finding defaults.
}
LOGGER.log(Level.FINE, "Loading driver configuration via classloader {0}", cl);
// When loading the driver config files we don't want settings found
// in later files in the classpath to override settings specified in
// earlier files. To do this we've got to read the returned
// Enumeration into temporary storage.
ArrayList<URL> urls = new ArrayList<>();
Enumeration<URL> urlEnum = cl.getResources("org/postgresql/driverconfig.properties");
while (urlEnum.hasMoreElements()) {
urls.add(urlEnum.nextElement());
}
for (int i = urls.size() - 1; i >= 0; i--) {
URL url = urls.get(i);
LOGGER.log(Level.FINE, "Loading driver configuration from: {0}", url);
InputStream is = url.openStream();
merged.load(is);
is.close();
}
return merged;
}
/**
* <p>Try to make a database connection to the given URL. The driver should return "null" if it
* realizes it is the wrong kind of driver to connect to the given URL. This will be common, as
* when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each
* loaded driver in turn.</p>
*
* <p>The driver should raise an SQLException if it is the right driver to connect to the given URL,
* but has trouble connecting to the database.</p>
*
* <p>The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as
* connection arguments.</p>
*
* <ul>
* <li>user - (required) The user to connect as</li>
* <li>password - (optional) The password for the user</li>
* <li>ssl -(optional) Use SSL when connecting to the server</li>
* <li>readOnly - (optional) Set connection to read-only by default</li>
* <li>charSet - (optional) The character set to be used for converting to/from
* the database to unicode. If multibyte is enabled on the server then the character set of the
* database is used as the default, otherwise the jvm character encoding is used as the default.
* This value is only used when connecting to a 7.2 or older server.</li>
* <li>loglevel - (optional) Enable logging of messages from the driver. The value is an integer
* from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to
* DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.</li>
* <li>compatible - (optional) This is used to toggle between different functionality
* as it changes across different releases of the jdbc driver code. The values here are versions
* of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on
* LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in
* functionality could be disabled by setting the compatible level to be "7.1", in which case the
* driver will revert to the 7.1 functionality.</li>
* </ul>
*
* <p>Normally, at least "user" and "password" properties should be included in the properties. For a
* list of supported character encoding , see
* http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will
* probably want to have set up the Postgres database itself to use the same encoding, with the
* {@code -E <encoding>} argument to createdb.</p>
*
* <p>Our protocol takes the forms:</p>
*
* <pre>
* jdbc:postgresql://host:port/database?param1=val1&amp;...
* </pre>
*
* @param url the URL of the database to connect to
* @param info a list of arbitrary tag/value pairs as connection arguments
* @return a connection to the URL or null if it isnt us
* @throws SQLException if a database access error occurs or the url is
* {@code null}
* @see java.sql.Driver#connect
*/
@Override
public Connection connect(String url, Properties info) throws SQLException {
if (url == null) {
throw new SQLException("url is null");
}
// get defaults
Properties defaults;
if (!url.startsWith("jdbc:postgresql:")) {
return null;
}
try {
defaults = getDefaultProperties();
} catch (IOException ioe) {
throw new PSQLException(GT.tr("Error loading default settings from driverconfig.properties"),
PSQLState.UNEXPECTED_ERROR, ioe);
}
// override defaults with provided properties
Properties props = new Properties(defaults);
if (info != null) {
Set<String> e = info.stringPropertyNames();
for (String propName : e) {
String propValue = info.getProperty(propName);
if (propValue == null) {
throw new PSQLException(
GT.tr("Properties for the driver contains a non-string value for the key ")
+ propName,
PSQLState.UNEXPECTED_ERROR);
}
props.setProperty(propName, propValue);
}
}
// parse URL and add more properties
if ((props = parseURL(url, props)) == null) {
throw new PSQLException(
GT.tr("Unable to parse URL {0}", url),
PSQLState.UNEXPECTED_ERROR);
}
try {
LOGGER.log(Level.FINE, "Connecting with URL: {0}", url);
// Enforce login timeout, if specified, by running the connection
// attempt in a separate thread. If we hit the timeout without the
// connection completing, we abandon the connection attempt in
// the calling thread, but the separate thread will keep trying.
// Eventually, the separate thread will either fail or complete
// the connection; at that point we clean up the connection if
// we managed to establish one after all. See ConnectThread for
// more details.
long timeout = timeout(props);
if (timeout <= 0) {
return makeConnection(url, props);
}
ConnectThread ct = new ConnectThread(url, props);
Thread thread = new Thread(ct, "PostgreSQL JDBC driver connection thread");
thread.setDaemon(true); // Don't prevent the VM from shutting down
thread.start();
return ct.getResult(timeout);
} catch (PSQLException ex1) {
LOGGER.log(Level.FINE, "Connection error: ", ex1);
// re-throw the exception, otherwise it will be caught next, and a
// org.postgresql.unusual error will be returned instead.
throw ex1;
} catch (Exception ex2) {
if ("java.security.AccessControlException".equals(ex2.getClass().getName())) {
// java.security.AccessControlException has been deprecated for removal, so compare the class name
throw new PSQLException(
GT.tr(
"Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."),
PSQLState.UNEXPECTED_ERROR, ex2);
}
LOGGER.log(Level.FINE, "Unexpected connection error: ", ex2);
throw new PSQLException(
GT.tr(
"Something unusual has occurred to cause the driver to fail. Please report this exception."),
PSQLState.UNEXPECTED_ERROR, ex2);
}
}
/**
* this is an empty method left here for graalvm
* we removed the ability to setup the logger from properties
* due to a security issue
*
* @param props Connection Properties
*/
private void setupLoggerFromProperties(final Properties props) {
}
/**
* Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers
* will return true if they understand the subprotocol specified in the URL and false if they
* don't. Our protocols start with jdbc:postgresql:
*
* @param url the URL of the driver
* @return true if this driver accepts the given URL
* @see java.sql.Driver#acceptsURL
*/
@Override
public boolean acceptsURL(String url) {
return parseURL(url, null) != null;
}
/**
* <p>The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties
* it should prompt a human for in order to get enough information to connect to a database.</p>
*
* <p>Note that depending on the values the human has supplied so far, additional values may become
* necessary, so it may be necessary to iterate through several calls to getPropertyInfo</p>
*
* @param url the Url of the database to connect to
* @param info a proposed list of tag/value pairs that will be sent on connect open.
* @return An array of DriverPropertyInfo objects describing possible properties. This array may
* be an empty array if no properties are required
* @see java.sql.Driver#getPropertyInfo
*/
@Override
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) {
Properties copy = new Properties(info);
Properties parse = parseURL(url, copy);
if (parse != null) {
copy = parse;
}
PGProperty[] knownProperties = PGProperty.values();
DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length];
for (int i = 0; i < props.length; i++) {
props[i] = knownProperties[i].toDriverPropertyInfo(copy);
}
return props;
}
@Override
public int getMajorVersion() {
return DriverInfo.MAJOR_VERSION;
}
@Override
public int getMinorVersion() {
return DriverInfo.MINOR_VERSION;
}
/**
* <p>Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true"
* here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC
* compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.</p>
*
* <p>For PostgreSQL, this is not yet possible, as we are not SQL92 compliant (yet).</p>
*/
@Override
public boolean jdbcCompliant() {
return false;
}
@Override
public Logger getParentLogger() {
return PARENT_LOGGER;
}
/**
* Perform a connect in a separate thread; supports getting the results from the original thread
* while enforcing a login timeout.
*/
private static class ConnectThread implements Runnable {
private final ResourceLock lock = new ResourceLock();
private final Condition lockCondition = lock.newCondition();
private final String url;
private final Properties props;
private Connection result;
private Throwable resultException;
private boolean abandoned;
ConnectThread(String url, Properties props) {
this.url = url;
this.props = props;
}
@Override
public void run() {
Connection conn;
Throwable error;
try {
conn = makeConnection(url, props);
error = null;
} catch (Throwable t) {
conn = null;
error = t;
}
try (ResourceLock ignore = lock.obtain()) {
if (abandoned) {
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
}
}
} else {
result = conn;
resultException = error;
lockCondition.signal();
}
}
}
/**
* Get the connection result from this (assumed running) thread. If the timeout is reached
* without a result being available, a SQLException is thrown.
*
* @param timeout timeout in milliseconds
* @return the new connection, if successful
* @throws SQLException if a connection error occurs or the timeout is reached
*/
public Connection getResult(long timeout) throws SQLException {
long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout;
try (ResourceLock ignore = lock.obtain()) {
while (true) {
if (result != null) {
return result;
}
Throwable resultException = this.resultException;
if (resultException != null) {
if (resultException instanceof SQLException) {
resultException.fillInStackTrace();
throw (SQLException) resultException;
} else {
throw new PSQLException(
GT.tr(
"Something unusual has occurred to cause the driver to fail. Please report this exception."),
PSQLState.UNEXPECTED_ERROR, resultException);
}
}
long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
if (delay <= 0) {
abandoned = true;
throw new PSQLException(GT.tr("Connection attempt timed out."),
PSQLState.CONNECTION_UNABLE_TO_CONNECT);
}
try {
lockCondition.await(delay, TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
// reset the interrupt flag
Thread.currentThread().interrupt();
abandoned = true;
// throw an unchecked exception which will hopefully not be ignored by the calling code
throw new RuntimeException(GT.tr("Interrupted while attempting to connect."));
}
}
}
}
}
}

View file

@ -37,15 +37,12 @@ public interface PGConnection {
* {@link java.sql.Connection#createArrayOf(String, Object[])}, but also
* provides support for primitive arrays.
*
* @param typeName
* The SQL name of the type to map the <i>elements</i> to.
* @param typeName The SQL name of the type to map the <i>elements</i> to.
* Must not be {@code null}.
* @param elements
* The array of objects to map. A {@code null} value will result in
* @param elements The array of objects to map. A {@code null} value will result in
* an {@link Array} representing {@code null}.
* @return An {@link Array} wrapping <i>elements</i>.
* @throws SQLException
* If for some reason the array cannot be created.
* @throws SQLException If for some reason the array cannot be created.
* @see java.sql.Connection#createArrayOf(String, Object[])
*/
Array createArrayOf(String typeName, Object elements) throws SQLException;
@ -147,15 +144,6 @@ public interface PGConnection {
*/
void addDataType(String type, Class<? extends PGobject> klass) throws SQLException;
/**
* Set the default statement reuse threshold before enabling server-side prepare. See
* {@link org.postgresql.PGStatement#setPrepareThreshold(int)} for details.
*
* @param threshold the new threshold
* @since build 302
*/
void setPrepareThreshold(int threshold);
/**
* Get the default server-side prepare reuse threshold for statements created from this
* connection.
@ -166,13 +154,13 @@ public interface PGConnection {
int getPrepareThreshold();
/**
* Set the default fetch size for statements created from this connection.
* Set the default statement reuse threshold before enabling server-side prepare. See
* {@link org.postgresql.PGStatement#setPrepareThreshold(int)} for details.
*
* @param fetchSize new default fetch size
* @throws SQLException if specified negative <code>fetchSize</code> parameter
* @see Statement#setFetchSize(int)
* @param threshold the new threshold
* @since build 302
*/
void setDefaultFetchSize(int fetchSize) throws SQLException;
void setPrepareThreshold(int threshold);
/**
* Get the default fetch size for statements created from this connection.
@ -183,6 +171,15 @@ public interface PGConnection {
*/
int getDefaultFetchSize();
/**
* Set the default fetch size for statements created from this connection.
*
* @param fetchSize new default fetch size
* @throws SQLException if specified negative <code>fetchSize</code> parameter
* @see Statement#setFetchSize(int)
*/
void setDefaultFetchSize(int fetchSize) throws SQLException;
/**
* Return the process ID (PID) of the backend server process handling this connection.
*
@ -192,6 +189,7 @@ public interface PGConnection {
/**
* Sends a query cancellation for this connection.
*
* @throws SQLException if there are problems cancelling the query
*/
void cancelQuery() throws SQLException;
@ -240,6 +238,7 @@ public interface PGConnection {
/**
* Configures if connection should use automatic savepoints.
*
* @param autoSave connection configuration regarding automatic per-query savepoints
* @see PGProperty#AUTOSAVE
*/
@ -364,6 +363,13 @@ public interface PGConnection {
*/
String getParameterStatus(String parameterName);
/**
* Get state of adaptive fetch for connection.
*
* @return state of adaptive fetch (turned on or off)
*/
boolean getAdaptiveFetch();
/**
* Turn on/off adaptive fetch for connection. Existing statements and resultSets won't be affected
* by change here.
@ -371,11 +377,4 @@ public interface PGConnection {
* @param adaptiveFetch desired state of adaptive fetch.
*/
void setAdaptiveFetch(boolean adaptiveFetch);
/**
* Get state of adaptive fetch for connection.
*
* @return state of adaptive fetch (turned on or off)
*/
boolean getAdaptiveFetch();
}

View file

@ -58,16 +58,6 @@ public enum PGEnvironment {
"Specifies the directory containing the PGSERVICEFILE file"),
;
private final String name;
private final String defaultValue;
private final String description;
PGEnvironment(String name, String defaultValue, String description) {
this.name = name;
this.defaultValue = defaultValue;
this.description = description;
}
private static final Map<String, PGEnvironment> PROPS_BY_NAME = new HashMap<>();
static {
@ -78,6 +68,16 @@ public enum PGEnvironment {
}
}
private final String name;
private final String defaultValue;
private final String description;
PGEnvironment(String name, String defaultValue, String description) {
this.name = name;
this.defaultValue = defaultValue;
this.description = description;
}
/**
* Returns the name of the parameter.
*

View file

@ -326,6 +326,7 @@ public enum PGProperty {
/**
* This property is no longer used by the driver and will be ignored.
*
* @deprecated Logging is configured via java.util.logging.
*/
@Deprecated
@ -336,6 +337,7 @@ public enum PGProperty {
/**
* This property is no longer used by the driver and will be ignored.
*
* @deprecated Logging is configured via java.util.logging.
*/
@Deprecated
@ -625,6 +627,7 @@ public enum PGProperty {
/**
* The String argument to give to the constructor of the SSL Factory.
*
* @deprecated use {@code ..Factory(Properties)} constructor.
*/
@Deprecated
@ -724,7 +727,7 @@ public enum PGProperty {
"any",
"Specifies what kind of server to connect",
false,
new String []{"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary", "preferPrimary"}),
new String[]{"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary", "preferPrimary"}),
/**
* Enable or disable TCP keep-alive. The default is {@code false}.
@ -777,11 +780,21 @@ public enum PGProperty {
;
private static final Map<String, PGProperty> PROPS_BY_NAME = new HashMap<>();
static {
for (PGProperty prop : PGProperty.values()) {
if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
}
}
}
private final String name;
private final String defaultValue;
private final boolean required;
private final String description;
private final String [] choices;
private final String[] choices;
PGProperty(String name, String defaultValue, String description) {
this(name, defaultValue, description, false);
@ -792,7 +805,7 @@ public enum PGProperty {
}
PGProperty(String name, String defaultValue, String description, boolean required,
String [] choices) {
String[] choices) {
this.name = name;
this.defaultValue = defaultValue;
this.required = required;
@ -800,14 +813,8 @@ public enum PGProperty {
this.choices = choices;
}
private static final Map<String, PGProperty> PROPS_BY_NAME = new HashMap<>();
static {
for (PGProperty prop : PGProperty.values()) {
if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
}
}
public static PGProperty forName(String name) {
return PROPS_BY_NAME.get(name);
}
/**
@ -852,7 +859,7 @@ public enum PGProperty {
*
* @return the available values for this connection parameter or null
*/
public String [] getChoices() {
public String[] getChoices() {
return choices;
}
@ -870,6 +877,7 @@ public enum PGProperty {
/**
* Returns the value of the connection parameter from the given {@link Properties} or the
* default value
*
* @param properties properties to take actual value from
* @return evaluated value for this connection parameter or null
* @deprecated use {@link #getOrDefault(Properties)} instead
@ -882,6 +890,7 @@ public enum PGProperty {
/**
* Returns the value of the connection parameter from the given {@link Properties} or null if there
* is no default value
*
* @param properties properties object to get value from
* @return evaluated value for this connection parameter
*/
@ -1010,10 +1019,6 @@ public enum PGProperty {
return propertyInfo;
}
public static PGProperty forName(String name) {
return PROPS_BY_NAME.get(name);
}
/**
* Return the property if exists but avoiding the default. Allowing the caller to detect the lack
* of a property.

View file

@ -32,6 +32,15 @@ public interface PGStatement {
*/
long getLastOID() throws SQLException;
/**
* Checks if this statement will be executed as a server-prepared statement. A return value of
* <code>true</code> indicates that the next execution of the statement will be done as a
* server-prepared statement, assuming the underlying protocol supports it.
*
* @return true if the next reuse of this statement will use a server-prepared statement
*/
boolean isUseServerPrepare();
/**
* Turn on the use of prepared statements in the server (server side prepared statements are
* unrelated to jdbc PreparedStatements) As of build 302, this method is equivalent to
@ -46,13 +55,13 @@ public interface PGStatement {
void setUseServerPrepare(boolean flag) throws SQLException;
/**
* Checks if this statement will be executed as a server-prepared statement. A return value of
* <code>true</code> indicates that the next execution of the statement will be done as a
* server-prepared statement, assuming the underlying protocol supports it.
* Gets the server-side prepare reuse threshold in use for this statement.
*
* @return true if the next reuse of this statement will use a server-prepared statement
* @return the current threshold
* @see #setPrepareThreshold(int)
* @since build 302
*/
boolean isUseServerPrepare();
int getPrepareThreshold();
/**
* <p>Sets the reuse threshold for using server-prepared statements.</p>
@ -72,13 +81,11 @@ public interface PGStatement {
void setPrepareThreshold(int threshold) throws SQLException;
/**
* Gets the server-side prepare reuse threshold in use for this statement.
* Get state of adaptive fetch for statement.
*
* @return the current threshold
* @see #setPrepareThreshold(int)
* @since build 302
* @return state of adaptive fetch (turned on or off)
*/
int getPrepareThreshold();
boolean getAdaptiveFetch();
/**
* Turn on/off adaptive fetch for statement. Existing resultSets won't be affected by change
@ -87,11 +94,4 @@ public interface PGStatement {
* @param adaptiveFetch desired state of adaptive fetch.
*/
void setAdaptiveFetch(boolean adaptiveFetch);
/**
* Get state of adaptive fetch for statement.
*
* @return state of adaptive fetch (turned on or off)
*/
boolean getAdaptiveFetch();
}

View file

@ -5,9 +5,8 @@
package org.postgresql.copy;
import org.postgresql.util.ByteStreamWriter;
import java.sql.SQLException;
import org.postgresql.util.ByteStreamWriter;
/**
* Copy bulk data from client into a PostgreSQL table very fast.

View file

@ -5,6 +5,12 @@
package org.postgresql.copy;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import java.sql.SQLException;
import org.postgresql.core.BaseConnection;
import org.postgresql.core.Encoding;
import org.postgresql.core.QueryExecutor;
@ -13,13 +19,6 @@ import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import java.sql.SQLException;
/**
* API for PostgreSQL COPY bulk data transfer.
*/

View file

@ -14,7 +14,7 @@ public interface CopyOut extends CopyOperation {
* @return byte array received from server, null if server complete copy operation
* @throws SQLException if something goes wrong for example socket timeout
*/
byte [] readFromCopy() throws SQLException;
byte[] readFromCopy() throws SQLException;
/**
* Wait for a row of data to be received from server on an active copy operation.
@ -25,5 +25,5 @@ public interface CopyOut extends CopyOperation {
* blocking mode return null
* @throws SQLException if something goes wrong for example socket timeout
*/
byte [] readFromCopy(boolean block) throws SQLException;
byte[] readFromCopy(boolean block) throws SQLException;
}

View file

@ -5,22 +5,21 @@
package org.postgresql.copy;
import org.postgresql.PGConnection;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.io.IOException;
import java.io.InputStream;
import java.sql.SQLException;
import java.util.Arrays;
import org.postgresql.PGConnection;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* InputStream for reading from a PostgreSQL COPY TO STDOUT operation.
*/
public class PGCopyInputStream extends InputStream implements CopyOut {
private CopyOut op;
private byte [] buf;
private byte[] buf;
private int at;
private int len;
@ -48,7 +47,7 @@ public class PGCopyInputStream extends InputStream implements CopyOut {
return op;
}
private byte [] fillBuffer() throws IOException {
private byte[] fillBuffer() throws IOException {
if (at >= len) {
try {
buf = getOp().readFromCopy();
@ -104,7 +103,7 @@ public class PGCopyInputStream extends InputStream implements CopyOut {
}
@Override
public byte [] readFromCopy() throws SQLException {
public byte[] readFromCopy() throws SQLException {
byte[] result = null;
try {
byte[] buf = fillBuffer();
@ -124,7 +123,7 @@ public class PGCopyInputStream extends InputStream implements CopyOut {
}
@Override
public byte [] readFromCopy(boolean block) throws SQLException {
public byte[] readFromCopy(boolean block) throws SQLException {
return readFromCopy();
}

View file

@ -5,21 +5,20 @@
package org.postgresql.copy;
import org.postgresql.PGConnection;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.GT;
import java.io.IOException;
import java.io.OutputStream;
import java.sql.SQLException;
import org.postgresql.PGConnection;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.GT;
/**
* OutputStream for buffered input into a PostgreSQL COPY FROM STDIN operation.
*/
public class PGCopyOutputStream extends OutputStream implements CopyIn {
private CopyIn op;
private final byte[] copyBuffer;
private final byte[] singleByteBuffer = new byte[1];
private CopyIn op;
private int at;
/**

View file

@ -29,136 +29,59 @@ import java.util.concurrent.ConcurrentMap;
*
* @author Brett Okken
*/
final class AsciiStringInterner {
private abstract static class BaseKey {
private final int hash;
BaseKey(int hash) {
this.hash = hash;
}
@Override
public final int hashCode() {
return hash;
}
@Override
public final boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof BaseKey)) {
return false;
}
final BaseKey other = (BaseKey) obj;
return equalsBytes(other);
}
abstract boolean equalsBytes(BaseKey other);
abstract boolean equals(byte[] other, int offset, int length);
abstract void appendString(StringBuilder sb);
}
/**
* Only used for lookups, never to actually store entries.
*/
private static class TempKey extends BaseKey {
final byte[] bytes;
final int offset;
final int length;
TempKey(int hash, byte[] bytes, int offset, int length) {
super(hash);
this.bytes = bytes;
this.offset = offset;
this.length = length;
}
@Override
boolean equalsBytes(BaseKey other) {
return other.equals(bytes, offset, length);
}
@Override
public boolean equals(byte[] other, int offset, int length) {
return arrayEquals(this.bytes, this.offset, this.length, other, offset, length);
}
@Override
void appendString(StringBuilder sb) {
for (int i = offset, j = offset + length; i < j; i++) {
sb.append((char) bytes[i]);
}
}
}
/**
* Instance used for inserting values into the cache. The {@code byte[]} must be a copy
* that will never be mutated.
*/
private static final class Key extends BaseKey {
final byte[] key;
Key(byte[] key, int hash) {
super(hash);
this.key = key;
}
/**
* {@inheritDoc}
*/
@Override
boolean equalsBytes(BaseKey other) {
return other.equals(key, 0, key.length);
}
@Override
public boolean equals(byte[] other, int offset, int length) {
return arrayEquals(this.key, 0, this.key.length, other, offset, length);
}
/**
* {@inheritDoc}
*/
@Override
void appendString(StringBuilder sb) {
for (int i = 0; i < key.length; i++) {
sb.append((char) key[i]);
}
}
}
/**
* Custom {@link SoftReference} implementation which maintains a reference to the key in the cache,
* which allows aggressive cleaning when garbage collector collects the {@code String} instance.
*/
private final class StringReference extends SoftReference<String> {
private final BaseKey key;
StringReference(BaseKey key, String referent) {
super(referent, refQueue);
this.key = key;
}
void dispose() {
cache.remove(key, this);
}
}
public final class AsciiStringInterner {
/**
* Contains the canonicalized values, keyed by the ascii {@code byte[]}.
*/
final ConcurrentMap<BaseKey, SoftReference<String>> cache = new ConcurrentHashMap<>(128);
/**
* Used for {@link Reference} as values in {@code cache}.
*/
final ReferenceQueue<String> refQueue = new ReferenceQueue<>();
public AsciiStringInterner() {
}
/**
* Generates a hash value for the relevant entries in <i>bytes</i> as long as all values are ascii ({@code >= 0}).
*
* @return hash code for relevant bytes, or {@code 0} if non-ascii bytes present.
*/
private static int hashKey(byte[] bytes, int offset, int length) {
int result = 1;
for (int i = offset, j = offset + length; i < j; i++) {
final byte b = bytes[i];
// bytes are signed values. all ascii values are positive
if (b < 0) {
return 0;
}
result = 31 * result + b;
}
return result;
}
/**
* Performs equality check between <i>a</i> and <i>b</i> (with corresponding offset/length values).
* <p>
* The {@code static boolean equals(byte[].class, int, int, byte[], int, int} method in {@link java.util.Arrays}
* is optimized for longer {@code byte[]} instances than is expected to be seen here.
* </p>
*/
static boolean arrayEquals(byte[] a, int aOffset, int aLength, byte[] b, int bOffset, int bLength) {
if (aLength != bLength) {
return false;
}
//TODO: in jdk9, could use VarHandle to read 4 bytes at a time as an int for comparison
// or 8 bytes as a long - though we likely expect short values here
for (int i = 0; i < aLength; i++) {
if (a[aOffset + i] != b[bOffset + i]) {
return false;
}
}
return true;
}
/**
* Preemptively populates a value into the cache. This is intended to be used with {@code String} constants
* which are frequently used. While this can work with other {@code String} values, if <i>val</i> is ever
@ -284,6 +207,7 @@ final class AsciiStringInterner {
/**
* Process any entries in {@link #refQueue} to purge from the {@link #cache}.
*
* @see StringReference#dispose()
*/
private void cleanQueue() {
@ -293,44 +217,6 @@ final class AsciiStringInterner {
}
}
/**
* Generates a hash value for the relevant entries in <i>bytes</i> as long as all values are ascii ({@code >= 0}).
* @return hash code for relevant bytes, or {@code 0} if non-ascii bytes present.
*/
private static int hashKey(byte[] bytes, int offset, int length) {
int result = 1;
for (int i = offset, j = offset + length; i < j; i++) {
final byte b = bytes[i];
// bytes are signed values. all ascii values are positive
if (b < 0) {
return 0;
}
result = 31 * result + b;
}
return result;
}
/**
* Performs equality check between <i>a</i> and <i>b</i> (with corresponding offset/length values).
* <p>
* The {@code static boolean equals(byte[].class, int, int, byte[], int, int} method in {@link java.util.Arrays}
* is optimized for longer {@code byte[]} instances than is expected to be seen here.
* </p>
*/
static boolean arrayEquals(byte[] a, int aOffset, int aLength, byte[] b, int bOffset, int bLength) {
if (aLength != bLength) {
return false;
}
//TODO: in jdk9, could use VarHandle to read 4 bytes at a time as an int for comparison
// or 8 bytes as a long - though we likely expect short values here
for (int i = 0; i < aLength; i++) {
if (a[aOffset + i] != b[bOffset + i]) {
return false;
}
}
return true;
}
/**
* {@inheritDoc}
*/
@ -351,4 +237,122 @@ final class AsciiStringInterner {
sb.append(']');
return sb.toString();
}
private abstract static class BaseKey {
private final int hash;
BaseKey(int hash) {
this.hash = hash;
}
@Override
public final int hashCode() {
return hash;
}
@Override
public final boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof BaseKey)) {
return false;
}
final BaseKey other = (BaseKey) obj;
return equalsBytes(other);
}
abstract boolean equalsBytes(BaseKey other);
abstract boolean equals(byte[] other, int offset, int length);
abstract void appendString(StringBuilder sb);
}
/**
* Only used for lookups, never to actually store entries.
*/
private static class TempKey extends BaseKey {
final byte[] bytes;
final int offset;
final int length;
TempKey(int hash, byte[] bytes, int offset, int length) {
super(hash);
this.bytes = bytes;
this.offset = offset;
this.length = length;
}
@Override
boolean equalsBytes(BaseKey other) {
return other.equals(bytes, offset, length);
}
@Override
public boolean equals(byte[] other, int offset, int length) {
return arrayEquals(this.bytes, this.offset, this.length, other, offset, length);
}
@Override
void appendString(StringBuilder sb) {
for (int i = offset, j = offset + length; i < j; i++) {
sb.append((char) bytes[i]);
}
}
}
/**
* Instance used for inserting values into the cache. The {@code byte[]} must be a copy
* that will never be mutated.
*/
private static final class Key extends BaseKey {
final byte[] key;
Key(byte[] key, int hash) {
super(hash);
this.key = key;
}
/**
* {@inheritDoc}
*/
@Override
boolean equalsBytes(BaseKey other) {
return other.equals(key, 0, key.length);
}
@Override
public boolean equals(byte[] other, int offset, int length) {
return arrayEquals(this.key, 0, this.key.length, other, offset, length);
}
/**
* {@inheritDoc}
*/
@Override
void appendString(StringBuilder sb) {
for (int i = 0; i < key.length; i++) {
sb.append((char) key[i]);
}
}
}
/**
* Custom {@link SoftReference} implementation which maintains a reference to the key in the cache,
* which allows aggressive cleaning when garbage collector collects the {@code String} instance.
*/
private final class StringReference extends SoftReference<String> {
private final BaseKey key;
StringReference(BaseKey key, String referent) {
super(referent, refQueue);
this.key = key;
}
void dispose() {
cache.remove(key, this);
}
}
}

View file

@ -5,6 +5,11 @@
package org.postgresql.core;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.TimerTask;
import java.util.logging.Logger;
import org.postgresql.PGConnection;
import org.postgresql.PGProperty;
import org.postgresql.jdbc.FieldMetadata;
@ -12,12 +17,6 @@ import org.postgresql.jdbc.TimestampUtils;
import org.postgresql.util.LruCache;
import org.postgresql.xml.PGXmlFactoryFactory;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.TimerTask;
import java.util.logging.Logger;
/**
* Driver-internal connection interface. Application code should not use this interface.
*/
@ -76,13 +75,14 @@ public interface BaseConnection extends PGConnection, Connection {
* {@link org.postgresql.util.PGobject} instance is returned.</p>
*
* <p>value or byteValue must be non-null</p>
*
* @param type the backend typename
* @param value the type-specific string representation of the value
* @param byteValue the type-specific binary representation of the value
* @return an appropriate object; never null.
* @throws SQLException if something goes wrong
*/
Object getObject(String type, String value, byte [] byteValue)
Object getObject(String type, String value, byte[] byteValue)
throws SQLException;
Encoding getEncoding() throws SQLException;

View file

@ -5,12 +5,11 @@
package org.postgresql.core;
import org.postgresql.PGStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
import org.postgresql.PGStatement;
/**
* Driver-internal statement interface. Application code should not use this interface.

View file

@ -5,11 +5,10 @@
package org.postgresql.core;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.util.LruCache;
import java.sql.SQLException;
import java.util.List;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.util.LruCache;
/**
* Creates an instance of {@link CachedQuery} for a given connection.

View file

@ -27,7 +27,7 @@ public final class CommandCompleteParser {
return rows;
}
void set(long oid, long rows) {
public void set(long oid, long rows) {
this.oid = oid;
this.rows = rows;
}

View file

@ -6,6 +6,11 @@
package org.postgresql.core;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.postgresql.PGProperty;
import org.postgresql.core.v3.ConnectionFactoryImpl;
import org.postgresql.util.GT;
@ -13,12 +18,6 @@ import org.postgresql.util.HostSpec;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Handles protocol-specific connection setup.
*

View file

@ -23,12 +23,10 @@ import java.util.logging.Logger;
*/
public class Encoding {
static final AsciiStringInterner INTERNER = new AsciiStringInterner();
private static final Logger LOGGER = Logger.getLogger(Encoding.class.getName());
private static final Encoding DEFAULT_ENCODING = new Encoding();
private static final Encoding UTF8_ENCODING = new Encoding(StandardCharsets.UTF_8, true);
/*
* Preferred JVM encodings for backend encodings.
*/
@ -78,8 +76,6 @@ public class Encoding {
encodings.put("LATIN10", new String[0]);
}
static final AsciiStringInterner INTERNER = new AsciiStringInterner();
private final Charset encoding;
private final boolean fastASCIINumbers;
@ -119,16 +115,6 @@ public class Encoding {
this(encoding, testAsciiNumbers(encoding));
}
/**
* Returns true if this encoding has characters '-' and '0'..'9' in exactly same position as
* ascii.
*
* @return true if the bytes can be scanned directly for ascii numbers.
*/
public boolean hasAsciiNumbers() {
return fastASCIINumbers;
}
/**
* Construct an Encoding for a given JVM encoding.
*
@ -195,6 +181,42 @@ public class Encoding {
INTERNER.putString(string);
}
/**
* Get an Encoding using the default encoding for the JVM.
*
* @return an Encoding instance
*/
public static Encoding defaultEncoding() {
return DEFAULT_ENCODING;
}
/**
* Checks whether this encoding is compatible with ASCII for the number characters '-' and
* '0'..'9'. Where compatible means that they are encoded with exactly same values.
*
* @return If faster ASCII number parsing can be used with this encoding.
*/
private static boolean testAsciiNumbers(Charset encoding) {
// TODO: test all postgres supported encoding to see if there are
// any which do _not_ have ascii numbers in same location
// at least all the encoding listed in the encodings hashmap have
// working ascii numbers
String test = "-0123456789";
byte[] bytes = test.getBytes(encoding);
String res = new String(bytes, StandardCharsets.US_ASCII);
return test.equals(res);
}
/**
* Returns true if this encoding has characters '-' and '0'..'9' in exactly same position as
* ascii.
*
* @return true if the bytes can be scanned directly for ascii numbers.
*/
public boolean hasAsciiNumbers() {
return fastASCIINumbers;
}
/**
* Get the name of the (JVM) encoding used.
*
@ -211,7 +233,7 @@ public class Encoding {
* @return a bytearray containing the encoded string
* @throws IOException if something goes wrong
*/
public byte [] encode(String s) throws IOException {
public byte[] encode(String s) throws IOException {
if (s == null) {
return null;
}
@ -315,34 +337,8 @@ public class Encoding {
return new OutputStreamWriter(out, encoding);
}
/**
* Get an Encoding using the default encoding for the JVM.
*
* @return an Encoding instance
*/
public static Encoding defaultEncoding() {
return DEFAULT_ENCODING;
}
@Override
public String toString() {
return encoding.name();
}
/**
* Checks whether this encoding is compatible with ASCII for the number characters '-' and
* '0'..'9'. Where compatible means that they are encoded with exactly same values.
*
* @return If faster ASCII number parsing can be used with this encoding.
*/
private static boolean testAsciiNumbers(Charset encoding) {
// TODO: test all postgres supported encoding to see if there are
// any which do _not_ have ascii numbers in same location
// at least all the encoding listed in the encodings hashmap have
// working ascii numbers
String test = "-0123456789";
byte[] bytes = test.getBytes(encoding);
String res = new String(bytes, StandardCharsets.US_ASCII);
return test.equals(res);
}
}

View file

@ -18,39 +18,6 @@ import java.io.IOException;
*/
public class EncodingPredictor {
public EncodingPredictor() {
}
/**
* In certain cases the encoding is not known for sure (e.g. before authentication).
* In such cases, backend might send messages in "native to database" encoding,
* thus pgjdbc has to guess the encoding nad
*/
public static class DecodeResult {
public final String result;
public final String encoding; // JVM name
DecodeResult(String result, String encoding) {
this.result = result;
this.encoding = encoding;
}
}
static class Translation {
public final String fatalText;
private final String [] texts;
public final String language;
public final String[] encodings;
Translation(String fatalText, String [] texts,
String language, String... encodings) {
this.fatalText = fatalText;
this.texts = texts;
this.language = language;
this.encodings = encodings;
}
}
private static final Translation[] FATAL_TRANSLATIONS =
new Translation[]{
new Translation("ВАЖНО", null, "ru", "WIN", "ALT", "KOI8"),
@ -63,6 +30,9 @@ public class EncodingPredictor {
"LATIN7", "LATIN9"),
};
public EncodingPredictor() {
}
public static DecodeResult decode(byte[] bytes, int offset, int length) {
Encoding defaultEncoding = Encoding.defaultEncoding();
for (Translation tr : FATAL_TRANSLATIONS) {
@ -148,4 +118,34 @@ public class EncodingPredictor {
}
return false;
}
/**
* In certain cases the encoding is not known for sure (e.g. before authentication).
* In such cases, backend might send messages in "native to database" encoding,
* thus pgjdbc has to guess the encoding nad
*/
public static class DecodeResult {
public final String result;
public final String encoding; // JVM name
DecodeResult(String result, String encoding) {
this.result = result;
this.encoding = encoding;
}
}
static class Translation {
public final String fatalText;
public final String language;
public final String[] encodings;
private final String[] texts;
Translation(String fatalText, String[] texts,
String language, String... encodings) {
this.fatalText = fatalText;
this.texts = texts;
this.language = language;
this.encodings = encodings;
}
}
}

View file

@ -5,39 +5,32 @@
package org.postgresql.core;
import org.postgresql.jdbc.FieldMetadata;
import java.util.Locale;
import org.postgresql.jdbc.FieldMetadata;
public class Field {
// The V3 protocol defines two constants for the format of data
public static final int TEXT_FORMAT = 0;
public static final int BINARY_FORMAT = 1;
// New string to avoid clashes with other strings
private static final String NOT_YET_LOADED = new String("pgType is not yet loaded");
private final int length; // Internal Length of this field
private final int oid; // OID of the type
private final int mod; // type modifier of this field
private String columnLabel; // Column label
private int format = TEXT_FORMAT; // In the V3 protocol each field has a format
private final int tableOid; // OID of table ( zero if no table )
// 0 = text, 1 = binary
// In the V2 protocol all fields in a
// binary cursor are binary and all
// others are text
private final int tableOid; // OID of table ( zero if no table )
private final int positionInTable;
private String columnLabel; // Column label
private int format = TEXT_FORMAT; // In the V3 protocol each field has a format
// Cache fields filled in by AbstractJdbc2ResultSetMetaData.fetchFieldMetaData.
// Don't use unless that has been called.
private FieldMetadata metadata;
private int sqlType;
private String pgType = NOT_YET_LOADED;
// New string to avoid clashes with other strings
private static final String NOT_YET_LOADED = new String("pgType is not yet loaded");
/**
* Construct a field based on the information fed to it.
*
@ -62,6 +55,7 @@ public class Field {
/**
* Construct a field based on the information fed to it.
*
* @param columnLabel the column label of the field
* @param oid the OID of the field
* @param length the length of the field
@ -150,22 +144,22 @@ public class Field {
+ ")";
}
public void setSQLType(int sqlType) {
this.sqlType = sqlType;
}
public int getSQLType() {
return sqlType;
}
public void setPGType(String pgType) {
this.pgType = pgType;
public void setSQLType(int sqlType) {
this.sqlType = sqlType;
}
public String getPGType() {
return pgType;
}
public void setPGType(String pgType) {
this.pgType = pgType;
}
public boolean isTypeInitialized() {
return pgType != NOT_YET_LOADED;
}

View file

@ -14,22 +14,22 @@ public class NativeQuery {
private static final String[] BIND_NAMES = new String[128 * 10];
private static final int[] NO_BINDS = new int[0];
public final String nativeSql;
public final int[] bindPositions;
public final SqlCommand command;
public final boolean multiStatement;
static {
for (int i = 1; i < BIND_NAMES.length; i++) {
BIND_NAMES[i] = "$" + i;
}
}
public final String nativeSql;
public final int[] bindPositions;
public final SqlCommand command;
public final boolean multiStatement;
public NativeQuery(String nativeSql, SqlCommand dml) {
this(nativeSql, NO_BINDS, true, dml);
}
public NativeQuery(String nativeSql, int [] bindPositions, boolean multiStatement, SqlCommand dml) {
public NativeQuery(String nativeSql, int[] bindPositions, boolean multiStatement, SqlCommand dml) {
this.nativeSql = nativeSql;
this.bindPositions =
bindPositions == null || bindPositions.length == 0 ? NO_BINDS : bindPositions;
@ -37,37 +37,6 @@ public class NativeQuery {
this.command = dml;
}
/**
* Stringize this query to a human-readable form, substituting particular parameter values for
* parameter placeholders.
*
* @param parameters a ParameterList returned by this Query's {@link Query#createParameterList}
* method, or {@code null} to leave the parameter placeholders unsubstituted.
* @return a human-readable representation of this query
*/
public String toString(ParameterList parameters) {
if (bindPositions.length == 0) {
return nativeSql;
}
int queryLength = nativeSql.length();
String[] params = new String[bindPositions.length];
for (int i = 1; i <= bindPositions.length; i++) {
String param = parameters == null ? "?" : parameters.toString(i, true);
params[i - 1] = param;
queryLength += param.length() - bindName(i).length();
}
StringBuilder sbuf = new StringBuilder(queryLength);
sbuf.append(nativeSql, 0, bindPositions[0]);
for (int i = 1; i <= bindPositions.length; i++) {
sbuf.append(params[i - 1]);
int nextBind = i < bindPositions.length ? bindPositions[i] : nativeSql.length();
sbuf.append(nativeSql, bindPositions[i - 1] + bindName(i).length(), nextBind);
}
return sbuf.toString();
}
/**
* Returns $1, $2, etc names of bind variables used by backend.
*
@ -110,6 +79,37 @@ public class NativeQuery {
return res;
}
/**
* Stringize this query to a human-readable form, substituting particular parameter values for
* parameter placeholders.
*
* @param parameters a ParameterList returned by this Query's {@link Query#createParameterList}
* method, or {@code null} to leave the parameter placeholders unsubstituted.
* @return a human-readable representation of this query
*/
public String toString(ParameterList parameters) {
if (bindPositions.length == 0) {
return nativeSql;
}
int queryLength = nativeSql.length();
String[] params = new String[bindPositions.length];
for (int i = 1; i <= bindPositions.length; i++) {
String param = parameters == null ? "?" : parameters.toString(i, true);
params[i - 1] = param;
queryLength += param.length() - bindName(i).length();
}
StringBuilder sbuf = new StringBuilder(queryLength);
sbuf.append(nativeSql, 0, bindPositions[0]);
for (int i = 1; i <= bindPositions.length; i++) {
sbuf.append(params[i - 1]);
int nextBind = i < bindPositions.length ? bindPositions[i] : nativeSql.length();
sbuf.append(nativeSql, bindPositions[i - 1] + bindName(i).length(), nextBind);
}
return sbuf.toString();
}
public SqlCommand getCommand() {
return command;
}

View file

@ -5,14 +5,13 @@
package org.postgresql.core;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* Provides constants for well-known backend OIDs for the types we commonly use.

View file

@ -5,18 +5,6 @@
package org.postgresql.core;
import org.postgresql.gss.GSSInputStream;
import org.postgresql.gss.GSSOutputStream;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.GT;
import org.postgresql.util.HostSpec;
import org.postgresql.util.PGPropertyMaxResultBufferParser;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.MessageProp;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.EOFException;
@ -32,8 +20,17 @@ import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.sql.SQLException;
import javax.net.SocketFactory;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.MessageProp;
import org.postgresql.gss.GSSInputStream;
import org.postgresql.gss.GSSOutputStream;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.GT;
import org.postgresql.util.HostSpec;
import org.postgresql.util.PGPropertyMaxResultBufferParser;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* <p>Wrapper around the raw connection to the server that implements some basic primitives
@ -48,39 +45,20 @@ public class PGStream implements Closeable, Flushable {
private final byte[] int4Buf;
private final byte[] int2Buf;
boolean gssEncrypted;
private Socket connection;
private VisibleBufferedInputStream pgInput;
private OutputStream pgOutput;
private byte [] streamBuffer;
public boolean isGssEncrypted() {
return gssEncrypted;
}
boolean gssEncrypted;
public void setSecContext(GSSContext secContext) {
MessageProp messageProp = new MessageProp(0, true);
pgInput = new VisibleBufferedInputStream(new GSSInputStream(pgInput.getWrapped(), secContext, messageProp ), 8192);
pgOutput = new GSSOutputStream(pgOutput, secContext, messageProp, 16384);
gssEncrypted = true;
}
private byte[] streamBuffer;
private long nextStreamAvailableCheckTime;
// This is a workaround for SSL sockets: sslInputStream.available() might return 0
// so we perform "1ms reads" once in a while
private int minStreamAvailableCheckDelay = 1000;
private Encoding encoding;
private Writer encodingWriter;
private long maxResultBuffer = -1;
private long resultBufferByteCount;
private int maxRowSizeBytes = -1;
/**
* Constructor: Connect to the PostgreSQL back end and return a stream connection.
*
@ -124,7 +102,7 @@ public class PGStream implements Closeable, Flushable {
keepAlive = pgStream.getSocket().getKeepAlive();
tcpNoDelay = pgStream.getSocket().getTcpNoDelay();
} catch ( SocketException ex ) {
} catch (SocketException ex) {
// ignore it
}
//close the existing stream
@ -161,6 +139,18 @@ public class PGStream implements Closeable, Flushable {
this(socketFactory, hostSpec, 0);
}
public boolean isGssEncrypted() {
return gssEncrypted;
}
public void setSecContext(GSSContext secContext) {
MessageProp messageProp = new MessageProp(0, true);
pgInput = new VisibleBufferedInputStream(new GSSInputStream(pgInput.getWrapped(), secContext, messageProp), 8192);
pgOutput = new GSSOutputStream(pgOutput, secContext, messageProp, 16384);
gssEncrypted = true;
}
public HostSpec getHostSpec() {
return hostSpec;
}
@ -242,11 +232,11 @@ public class PGStream implements Closeable, Flushable {
socket.connect(address, timeout);
}
return socket;
} catch ( Exception ex ) {
} catch (Exception ex) {
if (socket != null) {
try {
socket.close();
} catch ( Exception ex1 ) {
} catch (Exception ex1) {
ex.addSuppressed(ex1);
}
}
@ -754,13 +744,22 @@ public class PGStream implements Closeable, Flushable {
connection.close();
}
public int getNetworkTimeout() throws IOException {
return connection.getSoTimeout();
}
public void setNetworkTimeout(int milliseconds) throws IOException {
connection.setSoTimeout(milliseconds);
pgInput.setTimeoutRequested(milliseconds != 0);
}
public int getNetworkTimeout() throws IOException {
return connection.getSoTimeout();
/**
* Get MaxResultBuffer from PGStream.
*
* @return size of MaxResultBuffer
*/
public long getMaxResultBuffer() {
return maxResultBuffer;
}
/**
@ -775,12 +774,12 @@ public class PGStream implements Closeable, Flushable {
}
/**
* Get MaxResultBuffer from PGStream.
* Get actual max row size noticed so far.
*
* @return size of MaxResultBuffer
* @return value of max row size
*/
public long getMaxResultBuffer() {
return maxResultBuffer;
public int getMaxRowSizeBytes() {
return maxRowSizeBytes;
}
/**
@ -798,15 +797,6 @@ public class PGStream implements Closeable, Flushable {
}
}
/**
* Get actual max row size noticed so far.
*
* @return value of max row size
*/
public int getMaxRowSizeBytes() {
return maxRowSizeBytes;
}
/**
* Clear value of max row size noticed so far.
*/

View file

@ -6,10 +6,9 @@
package org.postgresql.core;
import org.postgresql.util.ByteStreamWriter;
import java.io.InputStream;
import java.sql.SQLException;
import org.postgresql.util.ByteStreamWriter;
/**
* <p>Abstraction of a list of parameters to be substituted into a Query. The protocol-specific details
@ -195,14 +194,16 @@ public interface ParameterList {
/**
* Use this operation to append more parameters to the current list.
*
* @param list of parameters to append with.
* @throws SQLException fault raised if driver or back end throw an exception
*/
void appendAll(ParameterList list) throws SQLException ;
void appendAll(ParameterList list) throws SQLException;
/**
* Returns the bound parameter values.
*
* @return Object array containing the parameter values.
*/
Object [] getValues();
Object[] getValues();
}

View file

@ -5,19 +5,18 @@
package org.postgresql.core;
import org.postgresql.jdbc.EscapeSyntaxCallMode;
import org.postgresql.jdbc.EscapedFunctions2;
import org.postgresql.util.GT;
import org.postgresql.util.IntList;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.postgresql.jdbc.EscapeSyntaxCallMode;
import org.postgresql.jdbc.EscapedFunctions2;
import org.postgresql.util.GT;
import org.postgresql.util.IntList;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* Basic query parser infrastructure.
@ -28,6 +27,10 @@ import java.util.List;
*/
public class Parser {
private static final char[] QUOTE_OR_ALPHABETIC_MARKER = {'\"', '0'};
private static final char[] QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS = {'\"', '0', '('};
private static final char[] SINGLE_QUOTE = {'\''};
public Parser() {
}
@ -414,7 +417,7 @@ public class Parser {
* @param list input list
* @return output array
*/
private static int [] toIntArray(IntList list) {
private static int[] toIntArray(IntList list) {
if (list == null) {
return null;
}
@ -628,7 +631,7 @@ public class Parser {
}
/**
Parse string to check presence of BEGIN keyword regardless of case.
* Parse string to check presence of BEGIN keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
@ -647,7 +650,7 @@ public class Parser {
}
/**
Parse string to check presence of ATOMIC keyword regardless of case.
* Parse string to check presence of ATOMIC keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
@ -861,6 +864,7 @@ public class Parser {
/**
* Returns true if a given string {@code s} has digit at position {@code pos}.
*
* @param s input string
* @param pos position (0-based)
* @return true if input string s has digit at position pos
@ -871,6 +875,7 @@ public class Parser {
/**
* Converts digit at position {@code pos} in string {@code s} to integer or throws.
*
* @param s input string
* @param pos position (0-based)
* @return integer value of a digit at position pos
@ -906,8 +911,7 @@ public class Parser {
* https://github.com/postgres/postgres/blob/f2c587067a8eb9cf1c8f009262381a6576ba3dd0/src/backend/utils/adt/arrayfuncs.c#L421-L438
* </p>
*
* @param c
* Character to examine.
* @param c Character to examine.
* @return Indication if the character is a whitespace which back end will
* escape.
*/
@ -1506,10 +1510,6 @@ public class Parser {
return i;
}
private static final char[] QUOTE_OR_ALPHABETIC_MARKER = {'\"', '0'};
private static final char[] QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS = {'\"', '0', '('};
private static final char[] SINGLE_QUOTE = {'\''};
// Static variables for parsing SQL when replaceProcessing is true.
private enum SqlParseState {
IN_SQLCODE,

View file

@ -41,12 +41,14 @@ public interface Query {
/**
* Returns SQL in native for database format.
*
* @return SQL in native for database format
*/
String getNativeSql();
/**
* Returns properties of the query (sql keyword, and some other parsing info).
*
* @return returns properties of the query (sql keyword, and some other parsing info) or null if not applicable
*/
SqlCommand getSqlCommand();
@ -65,6 +67,7 @@ public interface Query {
/**
* Get the number of times this Query has been batched.
*
* @return number of times <code>addBatch()</code> has been called.
*/
int getBatchSize();
@ -83,5 +86,5 @@ public interface Query {
* @return an array of single-statement queries, or <code>null</code> if this object is already a
* single-statement query.
*/
Query [] getSubqueries();
Query[] getSubqueries();
}

View file

@ -6,15 +6,6 @@
package org.postgresql.core;
import org.postgresql.PGNotification;
import org.postgresql.copy.CopyOperation;
import org.postgresql.core.v3.TypeTransferModeRegistry;
import org.postgresql.jdbc.AutoSave;
import org.postgresql.jdbc.BatchResultHandler;
import org.postgresql.jdbc.EscapeSyntaxCallMode;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.util.HostSpec;
import java.io.Closeable;
import java.io.IOException;
import java.sql.SQLException;
@ -23,6 +14,14 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import org.postgresql.PGNotification;
import org.postgresql.copy.CopyOperation;
import org.postgresql.core.v3.TypeTransferModeRegistry;
import org.postgresql.jdbc.AutoSave;
import org.postgresql.jdbc.BatchResultHandler;
import org.postgresql.jdbc.EscapeSyntaxCallMode;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.util.HostSpec;
/**
* <p>Abstracts the protocol-specific details of executing a query.</p>
@ -234,11 +233,11 @@ public interface QueryExecutor extends TypeTransferModeRegistry {
boolean isReWriteBatchedInsertsEnabled();
CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
String ... columnNames)
String... columnNames)
throws SQLException;
Object createQueryKey(String sql, boolean escapeProcessing, boolean isParameterized,
String ... columnNames);
String... columnNames);
CachedQuery createQueryByKey(Object key) throws SQLException;
@ -248,12 +247,13 @@ public interface QueryExecutor extends TypeTransferModeRegistry {
CachedQuery borrowCallableQuery(String sql) throws SQLException;
CachedQuery borrowReturningQuery(String sql, String [] columnNames) throws SQLException;
CachedQuery borrowReturningQuery(String sql, String[] columnNames) throws SQLException;
void releaseQuery(CachedQuery cachedQuery);
/**
* Wrap given native query into a ready for execution format.
*
* @param queries list of queries in native to database syntax
* @return query object ready for execution by this query executor
*/
@ -314,7 +314,7 @@ public interface QueryExecutor extends TypeTransferModeRegistry {
* and results substitutes for a fast-path function call.
*/
@Deprecated
byte [] fastpathCall(int fnid, ParameterList params, boolean suppressBegin)
byte[] fastpathCall(int fnid, ParameterList params, boolean suppressBegin)
throws SQLException;
/**
@ -449,6 +449,7 @@ public interface QueryExecutor extends TypeTransferModeRegistry {
* The returned object should refer only the minimum subset of objects required
* for proper resource cleanup. For instance, it should better not hold a strong reference to
* {@link QueryExecutor}.
*
* @return action that would close the connection cleanly.
*/
Closeable getCloseAction();
@ -520,13 +521,13 @@ public interface QueryExecutor extends TypeTransferModeRegistry {
boolean getStandardConformingStrings();
/**
*
* @return true if we are going to quote identifier provided in the returning array default is true
*/
boolean getQuoteReturningIdentifiers();
/**
* Returns backend timezone in java format.
*
* @return backend timezone in java format.
*/
TimeZone getTimeZone();
@ -538,6 +539,7 @@ public interface QueryExecutor extends TypeTransferModeRegistry {
/**
* Returns application_name connection property.
*
* @return application_name connection property
*/
String getApplicationName();
@ -570,10 +572,10 @@ public interface QueryExecutor extends TypeTransferModeRegistry {
*/
ReplicationProtocol getReplicationProtocol();
void setNetworkTimeout(int milliseconds) throws IOException;
int getNetworkTimeout() throws IOException;
void setNetworkTimeout(int milliseconds) throws IOException;
// Expose parameter status to PGConnection
Map<String, String> getParameterStatuses();

View file

@ -5,18 +5,6 @@
package org.postgresql.core;
import org.postgresql.PGNotification;
import org.postgresql.PGProperty;
import org.postgresql.jdbc.AutoSave;
import org.postgresql.jdbc.EscapeSyntaxCallMode;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.jdbc.ResourceLock;
import org.postgresql.util.HostSpec;
import org.postgresql.util.LruCache;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.ServerErrorMessage;
import java.io.Closeable;
import java.io.IOException;
import java.sql.SQLException;
@ -29,46 +17,51 @@ import java.util.TreeMap;
import java.util.concurrent.locks.Condition;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.postgresql.PGNotification;
import org.postgresql.PGProperty;
import org.postgresql.jdbc.AutoSave;
import org.postgresql.jdbc.EscapeSyntaxCallMode;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.jdbc.ResourceLock;
import org.postgresql.util.HostSpec;
import org.postgresql.util.LruCache;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.ServerErrorMessage;
@SuppressWarnings("try")
public abstract class QueryExecutorBase implements QueryExecutor {
private static final Logger LOGGER = Logger.getLogger(QueryExecutorBase.class.getName());
protected final PGStream pgStream;
protected final QueryExecutorCloseAction closeAction;
protected final boolean logServerErrorDetail;
protected final ResourceLock lock = new ResourceLock();
protected final Condition lockCondition = lock.newCondition();
private final String user;
private final String database;
private final int cancelSignalTimeout;
private int cancelPid;
private int cancelKey;
protected final QueryExecutorCloseAction closeAction;
private String serverVersion;
private int serverVersionNum;
private TransactionState transactionState = TransactionState.IDLE;
private final boolean reWriteBatchedInserts;
private final boolean columnSanitiserDisabled;
private final EscapeSyntaxCallMode escapeSyntaxCallMode;
private final boolean quoteReturningIdentifiers;
private final ArrayList<PGNotification> notifications = new ArrayList<>();
private final LruCache<Object, CachedQuery> statementCache;
private final CachedQueryCreateAction cachedQueryCreateAction;
// For getParameterStatuses(), GUC_REPORT tracking
private final TreeMap<String, String> parameterStatuses
= new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
private int cancelPid;
private int cancelKey;
private String serverVersion;
private int serverVersionNum;
private TransactionState transactionState = TransactionState.IDLE;
private PreferQueryMode preferQueryMode;
private AutoSave autoSave;
private boolean flushCacheOnDeallocate = true;
protected final boolean logServerErrorDetail;
// default value for server versions that don't report standard_conforming_strings
private boolean standardConformingStrings;
private SQLWarning warnings;
private final ArrayList<PGNotification> notifications = new ArrayList<>();
private final LruCache<Object, CachedQuery> statementCache;
private final CachedQueryCreateAction cachedQueryCreateAction;
// For getParameterStatuses(), GUC_REPORT tracking
private final TreeMap<String,String> parameterStatuses
= new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
protected final ResourceLock lock = new ResourceLock();
protected final Condition lockCondition = lock.newCondition();
@SuppressWarnings("this-escape")
protected QueryExecutorBase(PGStream pgStream, int cancelSignalTimeout, Properties info) throws SQLException {
@ -107,6 +100,7 @@ public abstract class QueryExecutorBase implements QueryExecutor {
/**
* Sends "terminate connection" message to the backend.
*
* @throws IOException in case connection termination fails
* @deprecated use {@link #getCloseAction()} instead
*/
@ -114,13 +108,13 @@ public abstract class QueryExecutorBase implements QueryExecutor {
protected abstract void sendCloseMessage() throws IOException;
@Override
public void setNetworkTimeout(int milliseconds) throws IOException {
pgStream.setNetworkTimeout(milliseconds);
public int getNetworkTimeout() throws IOException {
return pgStream.getNetworkTimeout();
}
@Override
public int getNetworkTimeout() throws IOException {
return pgStream.getNetworkTimeout();
public void setNetworkTimeout(int milliseconds) throws IOException {
pgStream.setNetworkTimeout(milliseconds);
}
@Override
@ -256,6 +250,10 @@ public abstract class QueryExecutorBase implements QueryExecutor {
return serverVersion;
}
public void setServerVersion(String serverVersion) {
this.serverVersion = serverVersion;
}
@SuppressWarnings("deprecation")
@Override
public int getServerVersionNum() {
@ -265,26 +263,10 @@ public abstract class QueryExecutorBase implements QueryExecutor {
return serverVersionNum = Utils.parseServerVersionStr(getServerVersion());
}
public void setServerVersion(String serverVersion) {
this.serverVersion = serverVersion;
}
public void setServerVersionNum(int serverVersionNum) {
this.serverVersionNum = serverVersionNum;
}
public void setTransactionState(TransactionState state) {
try (ResourceLock ignore = lock.obtain()) {
transactionState = state;
}
}
public void setStandardConformingStrings(boolean value) {
try (ResourceLock ignore = lock.obtain()) {
standardConformingStrings = value;
}
}
@Override
public boolean getStandardConformingStrings() {
try (ResourceLock ignore = lock.obtain()) {
@ -292,6 +274,12 @@ public abstract class QueryExecutorBase implements QueryExecutor {
}
}
public void setStandardConformingStrings(boolean value) {
try (ResourceLock ignore = lock.obtain()) {
standardConformingStrings = value;
}
}
@Override
public boolean getQuoteReturningIdentifiers() {
return quoteReturningIdentifiers;
@ -304,8 +292,10 @@ public abstract class QueryExecutorBase implements QueryExecutor {
}
}
public void setEncoding(Encoding encoding) throws IOException {
pgStream.setEncoding(encoding);
public void setTransactionState(TransactionState state) {
try (ResourceLock ignore = lock.obtain()) {
transactionState = state;
}
}
@Override
@ -313,6 +303,10 @@ public abstract class QueryExecutorBase implements QueryExecutor {
return pgStream.getEncoding();
}
public void setEncoding(Encoding encoding) throws IOException {
pgStream.setEncoding(encoding);
}
@Override
public boolean isReWriteBatchedInsertsEnabled() {
return this.reWriteBatchedInserts;
@ -329,7 +323,7 @@ public abstract class QueryExecutorBase implements QueryExecutor {
}
@Override
public final CachedQuery borrowReturningQuery(String sql, String [] columnNames)
public final CachedQuery borrowReturningQuery(String sql, String[] columnNames)
throws SQLException {
return statementCache.borrow(new QueryWithReturningColumnsKey(sql, true, true,
columnNames
@ -348,7 +342,7 @@ public abstract class QueryExecutorBase implements QueryExecutor {
@Override
public final Object createQueryKey(String sql, boolean escapeProcessing,
boolean isParameterized, String ... columnNames) {
boolean isParameterized, String... columnNames) {
Object key;
if (columnNames == null || columnNames.length != 0) {
// Null means "return whatever sensible columns are" (e.g. primary key, or serial, or something like that)
@ -369,7 +363,7 @@ public abstract class QueryExecutorBase implements QueryExecutor {
@Override
public final CachedQuery createQuery(String sql, boolean escapeProcessing,
boolean isParameterized, String ... columnNames)
boolean isParameterized, String... columnNames)
throws SQLException {
Object key = createQueryKey(sql, escapeProcessing, isParameterized, columnNames);
// Note: cache is not reused here for two reasons:

View file

@ -19,7 +19,7 @@ class QueryWithReturningColumnsKey extends BaseQueryKey {
private int size; // query length cannot exceed MAX_INT
QueryWithReturningColumnsKey(String sql, boolean isParameterized, boolean escapeProcessing,
String [] columnNames) {
String[] columnNames) {
super(sql, isParameterized, escapeProcessing);
if (columnNames == null) {
// TODO: teach parser to fetch key columns somehow when no column names were given
@ -37,7 +37,7 @@ class QueryWithReturningColumnsKey extends BaseQueryKey {
size = (int) super.getSize();
if (columnNames != null) {
size += 16; // array itself
for (String columnName: columnNames) {
for (String columnName : columnNames) {
size += columnName.length() * 2; // 2 bytes per char, revise with Java 9's compact strings
}
}

View file

@ -5,12 +5,11 @@
package org.postgresql.core;
import java.sql.SQLException;
import org.postgresql.replication.PGReplicationStream;
import org.postgresql.replication.fluent.logical.LogicalReplicationOptions;
import org.postgresql.replication.fluent.physical.PhysicalReplicationOptions;
import java.sql.SQLException;
/**
* <p>Abstracts the protocol-specific details of physic and logic replication.</p>
*

View file

@ -83,12 +83,14 @@ public interface ResultHandler {
/**
* Returns the first encountered exception. The rest are chained via {@link SQLException#setNextException(SQLException)}
*
* @return the first encountered exception
*/
SQLException getException();
/**
* Returns the first encountered warning. The rest are chained via {@link SQLException#setNextException(SQLException)}
*
* @return the first encountered warning
*/
SQLWarning getWarning();

View file

@ -30,8 +30,7 @@ public enum ServerVersion implements Version {
v13("13"),
v14("14"),
v15("15"),
v16("16")
;
v16("16");
private final int version;
@ -39,16 +38,6 @@ public enum ServerVersion implements Version {
this.version = parseServerVersionStr(version);
}
/**
* Get a machine-readable version number.
*
* @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
*/
@Override
public int getVersionNum() {
return version;
}
/**
* <p>Attempt to parse the server version string into an XXYYZZ form version number into a
* {@link Version}.</p>
@ -182,4 +171,14 @@ public enum ServerVersion implements Version {
return 0; /* unknown */
}
/**
* Get a machine-readable version number.
*
* @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
*/
@Override
public int getVersionNum() {
return version;
}
}

View file

@ -6,13 +6,12 @@
package org.postgresql.core;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* Poor man's Statement &amp; ResultSet, used for initial queries while we're still initializing the
@ -23,26 +22,6 @@ public class SetupQueryRunner {
public SetupQueryRunner() {
}
private static class SimpleResultHandler extends ResultHandlerBase {
private List<Tuple> tuples;
List<Tuple> getResults() {
return tuples;
}
@Override
public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
ResultCursor cursor) {
this.tuples = tuples;
}
@Override
public void handleWarning(SQLWarning warning) {
// We ignore warnings. We assume we know what we're
// doing in the setup queries.
}
}
public static Tuple run(QueryExecutor executor, String queryString,
boolean wantResults) throws SQLException {
Query query = executor.createSimpleQuery(queryString);
@ -73,4 +52,24 @@ public class SetupQueryRunner {
return tuples.get(0);
}
private static class SimpleResultHandler extends ResultHandlerBase {
private List<Tuple> tuples;
List<Tuple> getResults() {
return tuples;
}
@Override
public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
ResultCursor cursor) {
this.tuples = tuples;
}
@Override
public void handleWarning(SQLWarning warning) {
// We ignore warnings. We assume we know what we're
// doing in the setup queries.
}
}
}

View file

@ -5,6 +5,9 @@
package org.postgresql.core;
import java.util.Properties;
import javax.net.SocketFactory;
import javax.net.ssl.SSLSocketFactory;
import org.postgresql.PGProperty;
import org.postgresql.ssl.LibPQFactory;
import org.postgresql.util.GT;
@ -12,11 +15,6 @@ import org.postgresql.util.ObjectFactory;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.util.Properties;
import javax.net.SocketFactory;
import javax.net.ssl.SSLSocketFactory;
/**
* Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}.
*/

View file

@ -14,10 +14,43 @@ import static org.postgresql.core.SqlCommandType.WITH;
*
* @author Jeremy Whiting jwhiting@redhat.com
* @author Christopher Deckers (chrriis@gmail.com)
*
*/
public class SqlCommand {
public static final SqlCommand BLANK = SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK);
private final SqlCommandType commandType;
private final boolean parsedSQLhasRETURNINGKeyword;
private final int valuesBraceOpenPosition;
private final int valuesBraceClosePosition;
private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured,
int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent,
int priorQueryCount) {
commandType = type;
parsedSQLhasRETURNINGKeyword = isPresent;
boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured
&& valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition
&& !isPresent && priorQueryCount == 0;
this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1;
this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1;
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type,
boolean isBatchedReWritePropertyConfigured,
int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent,
int priorQueryCount) {
return new SqlCommand(type, isBatchedReWritePropertyConfigured,
valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent,
priorQueryCount);
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type) {
return new SqlCommand(type, false, -1, -1, false, 0);
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type,
boolean isRETURNINGkeywordPresent) {
return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0);
}
public boolean isBatchedReWriteCompatible() {
return valuesBraceOpenPosition >= 0;
@ -43,39 +76,4 @@ public class SqlCommand {
return parsedSQLhasRETURNINGKeyword || commandType == SELECT || commandType == WITH;
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type,
boolean isBatchedReWritePropertyConfigured,
int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent,
int priorQueryCount) {
return new SqlCommand(type, isBatchedReWritePropertyConfigured,
valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent,
priorQueryCount);
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type) {
return new SqlCommand(type, false, -1, -1, false, 0);
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type,
boolean isRETURNINGkeywordPresent) {
return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0);
}
private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured,
int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent,
int priorQueryCount) {
commandType = type;
parsedSQLhasRETURNINGKeyword = isPresent;
boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured
&& valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition
&& !isPresent && priorQueryCount == 0;
this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1;
this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1;
}
private final SqlCommandType commandType;
private final boolean parsedSQLhasRETURNINGKeyword;
private final int valuesBraceOpenPosition;
private final int valuesBraceClosePosition;
}

View file

@ -7,8 +7,8 @@ package org.postgresql.core;
/**
* Type information inspection support.
* @author Jeremy Whiting jwhiting@redhat.com
*
* @author Jeremy Whiting jwhiting@redhat.com
*/
public enum SqlCommandType {

View file

@ -9,11 +9,12 @@ package org.postgresql.core;
* Class representing a row in a {@link java.sql.ResultSet}.
*/
public class Tuple {
final byte[][] data;
private final boolean forUpdate;
final byte[] [] data;
/**
* Construct an empty tuple. Used in updatable result sets.
*
* @param length the number of fields in the tuple.
*/
public Tuple(int length) {
@ -22,19 +23,21 @@ public class Tuple {
/**
* Construct a populated tuple. Used when returning results.
*
* @param data the tuple data
*/
public Tuple(byte[] [] data) {
public Tuple(byte[][] data) {
this(data, false);
}
private Tuple(byte[] [] data, boolean forUpdate) {
private Tuple(byte[][] data, boolean forUpdate) {
this.data = data;
this.forUpdate = forUpdate;
}
/**
* Number of fields in the tuple
*
* @return number of fields
*/
public int fieldCount() {
@ -43,6 +46,7 @@ public class Tuple {
/**
* Total length in bytes of the tuple data.
*
* @return the number of bytes in this tuple
*/
public int length() {
@ -57,15 +61,17 @@ public class Tuple {
/**
* Get the data for the given field
*
* @param index 0-based field position in the tuple
* @return byte array of the data
*/
public byte [] get(int index) {
public byte[] get(int index) {
return data[index];
}
/**
* Create a copy of the tuple for updating.
*
* @return a copy of the tuple that allows updates
*/
public Tuple updateableCopy() {
@ -74,6 +80,7 @@ public class Tuple {
/**
* Create a read-only copy of the tuple
*
* @return a copy of the tuple that does not allow updates
*/
public Tuple readOnlyCopy() {
@ -88,10 +95,11 @@ public class Tuple {
/**
* Set the given field to the given data.
*
* @param index 0-based field position
* @param fieldData the data to set
*/
public void set(int index, byte [] fieldData) {
public void set(int index, byte[] fieldData) {
if (!forUpdate) {
throw new IllegalArgumentException("Attempted to write to readonly tuple");
}

View file

@ -5,10 +5,9 @@
package org.postgresql.core;
import org.postgresql.util.PGobject;
import java.sql.SQLException;
import java.util.Iterator;
import org.postgresql.util.PGobject;
public interface TypeInfo {
void addCoreType(String pgTypeName, Integer oid, Integer sqlType, String javaClass,

View file

@ -6,13 +6,12 @@
package org.postgresql.core;
import java.io.IOException;
import java.sql.SQLException;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.io.IOException;
import java.sql.SQLException;
/**
* Collection of utilities used by the protocol-level code.
*/

View file

@ -275,7 +275,7 @@ public class VisibleBufferedInputStream extends InputStream {
throw new IllegalArgumentException("n is too large");
}
if (avail >= n) {
index = index + (int)n;
index = index + (int) n;
return n;
}
n -= avail;
@ -347,7 +347,6 @@ public class VisibleBufferedInputStream extends InputStream {
}
/**
*
* @return the wrapped stream
*/
public InputStream getWrapped() {

View file

@ -5,14 +5,6 @@
package org.postgresql.core.v3;
import org.postgresql.PGProperty;
import org.postgresql.plugin.AuthenticationPlugin;
import org.postgresql.plugin.AuthenticationRequestType;
import org.postgresql.util.GT;
import org.postgresql.util.ObjectFactory;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
@ -21,15 +13,17 @@ import java.util.Arrays;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.postgresql.PGProperty;
import org.postgresql.plugin.AuthenticationPlugin;
import org.postgresql.plugin.AuthenticationRequestType;
import org.postgresql.util.GT;
import org.postgresql.util.ObjectFactory;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
class AuthenticationPluginManager {
private static final Logger LOGGER = Logger.getLogger(AuthenticationPluginManager.class.getName());
@FunctionalInterface
public interface PasswordAction<T, R> {
R apply(T password) throws PSQLException, IOException;
}
private AuthenticationPluginManager() {
}
@ -51,7 +45,7 @@ class AuthenticationPluginManager {
* @throws IOException Bubbles up any thrown IOException from the provided action
*/
public static <T> T withPassword(AuthenticationRequestType type, Properties info,
PasswordAction<char [], T> action) throws PSQLException, IOException {
PasswordAction<char[], T> action) throws PSQLException, IOException {
char[] password = null;
String authPluginClassName = PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info);
@ -94,7 +88,7 @@ class AuthenticationPluginManager {
* encoded password. After completion, for security reasons the {@code byte[]} array will be
* wiped by filling it with zeroes. Callers must not rely on being able to read
* the password {@code byte[]} after the callback has completed.</p>
*
* @param type The authentication type that is being requested
* @param info The connection properties for the connection
* @param action The action to invoke with the encoded password
@ -122,4 +116,9 @@ class AuthenticationPluginManager {
Arrays.fill(encodedPassword, (byte) 0);
}
}
@FunctionalInterface
public interface PasswordAction<T, R> {
R apply(T password) throws PSQLException, IOException;
}
}

View file

@ -16,15 +16,14 @@ import org.postgresql.core.ParameterList;
*
* @author Jeremy Whiting jwhiting@redhat.com
* @author Christopher Deckers (chrriis@gmail.com)
*
*/
public class BatchedQuery extends SimpleQuery {
private String sql;
private final int valuesBraceOpenPosition;
private final int valuesBraceClosePosition;
private final int batchSize;
private BatchedQuery [] blocks;
private String sql;
private BatchedQuery[] blocks;
public BatchedQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
int valuesBraceOpenPosition,

View file

@ -6,15 +6,14 @@
package org.postgresql.core.v3;
import java.io.InputStream;
import java.sql.SQLException;
import org.postgresql.core.ParameterList;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.io.InputStream;
import java.sql.SQLException;
/**
* Parameter list for V3 query strings that contain multiple statements. We delegate to one
* SimpleParameterList per statement, and translate parameter indexes as needed.
@ -22,6 +21,10 @@ import java.sql.SQLException;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class CompositeParameterList implements V3ParameterList {
private final int total;
private final SimpleParameterList[] subparams;
private final int[] offsets;
CompositeParameterList(SimpleParameterList[] subparams, int[] offsets) {
this.subparams = subparams;
this.offsets = offsets;
@ -166,7 +169,7 @@ class CompositeParameterList implements V3ParameterList {
}
@Override
public SimpleParameterList [] getSubparams() {
public SimpleParameterList[] getSubparams() {
return subparams;
}
@ -178,22 +181,22 @@ class CompositeParameterList implements V3ParameterList {
}
@Override
public byte [][] getEncoding() {
public byte[][] getEncoding() {
return null; // unsupported
}
@Override
public byte [] getFlags() {
public byte[] getFlags() {
return null; // unsupported
}
@Override
public int [] getParamTypes() {
public int[] getParamTypes() {
return null; // unsupported
}
@Override
public Object [] getValues() {
public Object[] getValues() {
return null; // unsupported
}
@ -208,8 +211,4 @@ class CompositeParameterList implements V3ParameterList {
subparam.convertFunctionOutParameters();
}
}
private final int total;
private final SimpleParameterList[] subparams;
private final int[] offsets;
}

View file

@ -6,12 +6,11 @@
package org.postgresql.core.v3;
import java.util.Map;
import org.postgresql.core.ParameterList;
import org.postgresql.core.Query;
import org.postgresql.core.SqlCommand;
import java.util.Map;
/**
* V3 Query implementation for queries that involve multiple statements. We split it up into one
* SimpleQuery per statement, and wrap the corresponding per-statement SimpleParameterList objects
@ -20,6 +19,9 @@ import java.util.Map;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class CompositeQuery implements Query {
private final SimpleQuery[] subqueries;
private final int[] offsets;
CompositeQuery(SimpleQuery[] subqueries, int[] offsets) {
this.subqueries = subqueries;
this.offsets = offsets;
@ -105,7 +107,4 @@ class CompositeQuery implements Query {
public Map<String, Integer> getResultSetColumnNameIndexMap() {
return null; // unsupported
}
private final SimpleQuery[] subqueries;
private final int[] offsets;
}

View file

@ -6,6 +6,22 @@
package org.postgresql.core.v3;
import java.io.IOException;
import java.net.ConnectException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TimeZone;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
import javax.net.SocketFactory;
import org.postgresql.PGProperty;
import org.postgresql.core.ConnectionFactory;
import org.postgresql.core.PGStream;
@ -35,24 +51,6 @@ import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.ServerErrorMessage;
import java.io.IOException;
import java.net.ConnectException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TimeZone;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
import javax.net.SocketFactory;
/**
* ConnectionFactory implementation for version 3 (7.4+) connections.
*
@ -60,29 +58,6 @@ import javax.net.SocketFactory;
*/
public class ConnectionFactoryImpl extends ConnectionFactory {
private static class StartupParam {
private final String key;
private final String value;
StartupParam(String key, String value) {
this.key = key;
this.value = value;
}
@Override
public String toString() {
return this.key + "=" + this.value;
}
public byte[] getEncodedKey() {
return this.key.getBytes(StandardCharsets.UTF_8);
}
public byte[] getEncodedValue() {
return this.value.getBytes(StandardCharsets.UTF_8);
}
}
private static final Logger LOGGER = Logger.getLogger(ConnectionFactoryImpl.class.getName());
private static final int AUTH_REQ_OK = 0;
private static final int AUTH_REQ_KRB4 = 1;
@ -97,12 +72,54 @@ public class ConnectionFactoryImpl extends ConnectionFactory {
private static final int AUTH_REQ_SASL = 10;
private static final int AUTH_REQ_SASL_CONTINUE = 11;
private static final int AUTH_REQ_SASL_FINAL = 12;
private static final String IN_HOT_STANDBY = "in_hot_standby";
public ConnectionFactoryImpl() {
}
private static void log(Level level, String msg, Throwable thrown, Object... params) {
if (!LOGGER.isLoggable(level)) {
return;
}
LogRecord rec = new LogRecord(level, msg);
// Set the loggerName of the LogRecord with the current logger
rec.setLoggerName(LOGGER.getName());
rec.setParameters(params);
rec.setThrown(thrown);
LOGGER.log(rec);
}
/**
* Convert Java time zone to postgres time zone. All others stay the same except that GMT+nn
* changes to GMT-nn and vise versa.
* If you provide GMT+/-nn postgres uses POSIX rules which has a positive sign for west of Greenwich
* JAVA uses ISO rules which the positive sign is east of Greenwich
* To make matters more interesting postgres will always report in ISO
*
* @return The current JVM time zone in postgresql format.
*/
private static String createPostgresTimeZone() {
String tz = TimeZone.getDefault().getID();
if (tz.length() <= 3 || !tz.startsWith("GMT")) {
return tz;
}
char sign = tz.charAt(3);
String start;
switch (sign) {
case '+':
start = "GMT-";
break;
case '-':
start = "GMT+";
break;
default:
// unknown type
return tz;
}
return start + tz.substring(4);
}
private PGStream tryConnect(Properties info, SocketFactory socketFactory, HostSpec hostSpec,
SslMode sslMode, GSSEncMode gssEncMode)
throws SQLException, IOException {
@ -395,59 +412,16 @@ public class ConnectionFactoryImpl extends ConnectionFactory {
return paramList;
}
private static void log(Level level, String msg, Throwable thrown, Object... params) {
if (!LOGGER.isLoggable(level)) {
return;
}
LogRecord rec = new LogRecord(level, msg);
// Set the loggerName of the LogRecord with the current logger
rec.setLoggerName(LOGGER.getName());
rec.setParameters(params);
rec.setThrown(thrown);
LOGGER.log(rec);
}
/**
* Convert Java time zone to postgres time zone. All others stay the same except that GMT+nn
* changes to GMT-nn and vise versa.
* If you provide GMT+/-nn postgres uses POSIX rules which has a positive sign for west of Greenwich
* JAVA uses ISO rules which the positive sign is east of Greenwich
* To make matters more interesting postgres will always report in ISO
*
* @return The current JVM time zone in postgresql format.
*/
private static String createPostgresTimeZone() {
String tz = TimeZone.getDefault().getID();
if (tz.length() <= 3 || !tz.startsWith("GMT")) {
return tz;
}
char sign = tz.charAt(3);
String start;
switch (sign) {
case '+':
start = "GMT-";
break;
case '-':
start = "GMT+";
break;
default:
// unknown type
return tz;
}
return start + tz.substring(4);
}
@SuppressWarnings("fallthrough")
private PGStream enableGSSEncrypted(PGStream pgStream, GSSEncMode gssEncMode, String host, Properties info,
int connectTimeout)
throws IOException, PSQLException {
if ( gssEncMode == GSSEncMode.DISABLE ) {
if (gssEncMode == GSSEncMode.DISABLE) {
return pgStream;
}
if (gssEncMode == GSSEncMode.ALLOW ) {
if (gssEncMode == GSSEncMode.ALLOW) {
// start with plain text and let the server request it
return pgStream;
}
@ -892,7 +866,6 @@ public class ConnectionFactoryImpl extends ConnectionFactory {
* @see <a href="https://www.postgresql.org/docs/current/hot-standby.html">Hot standby documentation</a>
* @see <a href="https://www.postgresql.org/message-id/flat/1700970.cRWpxnom9y@hammer.magicstack.net">in_hot_standby patch thread v10</a>
* @see <a href="https://www.postgresql.org/message-id/flat/CAF3%2BxM%2B8-ztOkaV9gHiJ3wfgENTq97QcjXQt%2BrbFQ6F7oNzt9A%40mail.gmail.com">in_hot_standby patch thread v14</a>
*
*/
private boolean isPrimary(QueryExecutor queryExecutor) throws SQLException, IOException {
String inHotStandby = queryExecutor.getParameterStatus(IN_HOT_STANDBY);
@ -904,4 +877,27 @@ public class ConnectionFactoryImpl extends ConnectionFactory {
String queriedTransactionReadonly = queryExecutor.getEncoding().decode(nonNullResults.get(0));
return "off".equalsIgnoreCase(queriedTransactionReadonly);
}
private static class StartupParam {
private final String key;
private final String value;
StartupParam(String key, String value) {
this.key = key;
this.value = value;
}
@Override
public String toString() {
return this.key + "=" + this.value;
}
public byte[] getEncodedKey() {
return this.key.getBytes(StandardCharsets.UTF_8);
}
public byte[] getEncodedValue() {
return this.value.getBytes(StandardCharsets.UTF_8);
}
}
}

View file

@ -5,13 +5,12 @@
package org.postgresql.core.v3;
import org.postgresql.copy.CopyDual;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.PSQLException;
import java.sql.SQLException;
import java.util.ArrayDeque;
import java.util.Queue;
import org.postgresql.copy.CopyDual;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.PSQLException;
public class CopyDualImpl extends CopyOperationImpl implements CopyDual {
private final Queue<byte[]> received = new ArrayDeque<>();
@ -40,12 +39,12 @@ public class CopyDualImpl extends CopyOperationImpl implements CopyDual {
}
@Override
public byte [] readFromCopy() throws SQLException {
public byte[] readFromCopy() throws SQLException {
return readFromCopy(true);
}
@Override
public byte [] readFromCopy(boolean block) throws SQLException {
public byte[] readFromCopy(boolean block) throws SQLException {
if (received.isEmpty()) {
getQueryExecutor().readFromCopy(this, block);
}

View file

@ -5,19 +5,18 @@
package org.postgresql.core.v3;
import java.sql.SQLException;
import org.postgresql.copy.CopyIn;
import org.postgresql.util.ByteStreamWriter;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.sql.SQLException;
/**
* <p>COPY FROM STDIN operation.</p>
*
* <p>Anticipated flow:
*
* <p>
* CopyManager.copyIn() -&gt;QueryExecutor.startCopy() - sends given query to server
* -&gt;processCopyResults(): - receives CopyInResponse from Server - creates new CopyInImpl
* -&gt;initCopy(): - receives copy metadata from server -&gt;CopyInImpl.init() -&gt;lock()

View file

@ -5,17 +5,16 @@
package org.postgresql.core.v3;
import java.sql.SQLException;
import org.postgresql.copy.CopyOperation;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.sql.SQLException;
public abstract class CopyOperationImpl implements CopyOperation {
QueryExecutorImpl queryExecutor;
int rowFormat;
int [] fieldFormats;
int[] fieldFormats;
long handledRowCount = -1;
public CopyOperationImpl() {

View file

@ -5,9 +5,8 @@
package org.postgresql.core.v3;
import org.postgresql.copy.CopyOut;
import java.sql.SQLException;
import org.postgresql.copy.CopyOut;
/**
* <p>Anticipated flow of a COPY TO STDOUT operation:</p>
@ -24,18 +23,18 @@ import java.sql.SQLException;
* &lt;-returned: byte array of data received from server or null at end.</p>
*/
public class CopyOutImpl extends CopyOperationImpl implements CopyOut {
private byte [] currentDataRow;
private byte[] currentDataRow;
public CopyOutImpl() {
}
@Override
public byte [] readFromCopy() throws SQLException {
public byte[] readFromCopy() throws SQLException {
return readFromCopy(true);
}
@Override
public byte [] readFromCopy(boolean block) throws SQLException {
public byte[] readFromCopy(boolean block) throws SQLException {
currentDataRow = null;
getQueryExecutor().readFromCopy(this, block);
return currentDataRow;

View file

@ -7,7 +7,6 @@ package org.postgresql.core.v3;
/**
* Information for "pending describe queue".
*
*/
class DescribeRequest {
public final SimpleQuery query;

View file

@ -7,7 +7,6 @@ package org.postgresql.core.v3;
/**
* Information for "pending execute queue".
*
*/
class ExecuteRequest {
public final SimpleQuery query;

View file

@ -6,10 +6,9 @@
package org.postgresql.core.v3;
import org.postgresql.core.ResultCursor;
import java.lang.ref.PhantomReference;
import java.nio.charset.StandardCharsets;
import org.postgresql.core.ResultCursor;
/**
* V3 ResultCursor implementation in terms of backend Portals. This holds the state of a single
@ -18,6 +17,11 @@ import java.nio.charset.StandardCharsets;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class Portal implements ResultCursor {
private final SimpleQuery query;
private final String portalName;
private final byte[] encodedName;
private PhantomReference<?> cleanupRef;
Portal(SimpleQuery query, String portalName) {
this.query = query;
this.portalName = portalName;
@ -38,6 +42,12 @@ class Portal implements ResultCursor {
return portalName;
}
// Holding on to a reference to the generating query has
// the nice side-effect that while this Portal is referenced,
// so is the SimpleQuery, so the underlying statement won't
// be closed while the portal is open (the backend closes
// all open portals when the statement is closed)
byte[] getEncodedPortalName() {
return encodedName;
}
@ -54,15 +64,4 @@ class Portal implements ResultCursor {
public String toString() {
return portalName;
}
// Holding on to a reference to the generating query has
// the nice side-effect that while this Portal is referenced,
// so is the SimpleQuery, so the underlying statement won't
// be closed while the portal is open (the backend closes
// all open portals when the statement is closed)
private final SimpleQuery query;
private final String portalName;
private final byte[] encodedName;
private PhantomReference<?> cleanupRef;
}

View file

@ -6,6 +6,32 @@
package org.postgresql.core.v3;
import java.io.IOException;
import java.lang.ref.PhantomReference;
import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.postgresql.PGProperty;
import org.postgresql.copy.CopyIn;
import org.postgresql.copy.CopyOperation;
@ -46,33 +72,6 @@ import org.postgresql.util.PSQLState;
import org.postgresql.util.PSQLWarning;
import org.postgresql.util.ServerErrorMessage;
import java.io.IOException;
import java.lang.ref.PhantomReference;
import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* QueryExecutor implementation for the V3 protocol.
*/
@ -82,6 +81,56 @@ public class QueryExecutorImpl extends QueryExecutorBase {
private static final Logger LOGGER = Logger.getLogger(QueryExecutorImpl.class.getName());
private static final Field[] NO_FIELDS = new Field[0];
// Deadlock avoidance:
//
// It's possible for the send and receive streams to get "deadlocked" against each other since
// we do not have a separate thread. The scenario is this: we have two streams:
//
// driver -> TCP buffering -> server
// server -> TCP buffering -> driver
//
// The server behaviour is roughly:
// while true:
// read message
// execute message
// write results
//
// If the server -> driver stream has a full buffer, the write will block.
// If the driver is still writing when this happens, and the driver -> server
// stream also fills up, we deadlock: the driver is blocked on write() waiting
// for the server to read some more data, and the server is blocked on write()
// waiting for the driver to read some more data.
//
// To avoid this, we guess at how much response data we can request from the
// server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES).
// This is the point where the server blocks on write and stops reading data. If we
// reach this point, we force a Sync message and read pending data from the server
// until ReadyForQuery, then go back to writing more queries unless we saw an error.
//
// This is not 100% reliable -- it's only done in the batch-query case and only
// at a reasonably high level (per query, not per message), and it's only an estimate
// -- so it might break. To do it correctly in all cases would seem to require a
// separate send or receive thread as we can only do the Sync-and-read-results
// operation at particular points, and also as we don't really know how much data
// the server is sending.
//
// Our message size estimation is coarse, and disregards asynchronous
// notifications, warnings/info/debug messages, etc, so the response size may be
// quite different from the 250 bytes assumed here even for queries that don't
// return data.
//
// See github issue #194 and #195 .
//
// Assume 64k server->client buffering, which is extremely conservative. A typical
// system will have 200kb or more of buffers for its receive buffers, and the sending
// system will typically have the same on the send side, giving us 400kb or to work
// with. (We could check Java's receive buffer size, but prefer to assume a very
// conservative buffer instead, and we don't know how big the server's send
// buffer is.)
//
private static final int MAX_BUFFERED_RECV_BYTES = 64000;
private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250;
private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed");
static {
//canonicalize commonly seen strings to reduce memory and speed comparisons
@ -101,21 +150,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
Encoding.canonicalize("in_hot_standby");
}
/**
* TimeZone of the current connection (TimeZone backend parameter).
*/
private TimeZone timeZone;
/**
* application_name connection property.
*/
private String applicationName;
/**
* True if server uses integers for date and time fields. False if server uses double.
*/
private boolean integerDateTimes;
/**
* Bit set that has a bit set for each oid which should be received using binary format.
*/
@ -131,29 +165,114 @@ public class QueryExecutorImpl extends QueryExecutorBase {
* from Sync messages vs from simple execute (aka 'Q').
*/
private final SimpleQuery sync;
private final ReplicationProtocol replicationProtocol;
/**
* {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those
*/
private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser();
private final AdaptiveFetchCache adaptiveFetchCache;
private final HashMap<PhantomReference<SimpleQuery>, String> parsedQueryMap =
new HashMap<>();
private final ReferenceQueue<SimpleQuery> parsedQueryCleanupQueue =
new ReferenceQueue<>();
private final HashMap<PhantomReference<Portal>, String> openPortalMap =
new HashMap<>();
private final ReferenceQueue<Portal> openPortalCleanupQueue = new ReferenceQueue<>();
private final Deque<SimpleQuery> pendingParseQueue = new ArrayDeque<>();
private final Deque<Portal> pendingBindQueue = new ArrayDeque<>();
private final Deque<ExecuteRequest> pendingExecuteQueue = new ArrayDeque<>();
private final Deque<DescribeRequest> pendingDescribeStatementQueue =
new ArrayDeque<>();
private final Deque<SimpleQuery> pendingDescribePortalQueue = new ArrayDeque<>();
private final boolean allowEncodingChanges;
private final boolean cleanupSavePoints;
//
// Query parsing
//
private final SimpleQuery beginTransactionQuery =
new SimpleQuery(
new NativeQuery("BEGIN", null, false, SqlCommand.BLANK),
null, false);
private final SimpleQuery beginReadOnlyTransactionQuery =
new SimpleQuery(
new NativeQuery("BEGIN READ ONLY", null, false, SqlCommand.BLANK),
null, false);
//
// Query execution
//
private final SimpleQuery emptyQuery =
new SimpleQuery(
new NativeQuery("", null, false,
SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)
), null, false);
private final SimpleQuery autoSaveQuery =
new SimpleQuery(
new NativeQuery("SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
null, false);
private final SimpleQuery releaseAutoSave =
new SimpleQuery(
new NativeQuery("RELEASE SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
null, false);
/*
In autosave mode we use this query to roll back errored transactions
*/
private final SimpleQuery restoreToAutoSave =
new SimpleQuery(
new NativeQuery("ROLLBACK TO SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
null, false);
AtomicBoolean processingCopyResults = new AtomicBoolean(false);
/**
* TimeZone of the current connection (TimeZone backend parameter).
*/
private TimeZone timeZone;
/**
* application_name connection property.
*/
private String applicationName;
/**
* True if server uses integers for date and time fields. False if server uses double.
*/
private boolean integerDateTimes;
private short deallocateEpoch;
/**
* This caches the latest observed {@code set search_path} query so the reset of prepared
* statement cache can be skipped if using repeated calls for the same {@code set search_path}
* value.
*/
private String lastSetSearchPathQuery;
/**
* The exception that caused the last transaction to fail.
*/
private SQLException transactionFailCause;
private final ReplicationProtocol replicationProtocol;
//
// Fastpath
//
/**
* {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those
* <p>Supplement to synchronization of public methods on current QueryExecutor.</p>
*
* <p>Necessary for keeping the connection intact between calls to public methods sharing a state
* such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access
* point.</p>
*
* <p>Public methods sharing that state must then be synchronized among themselves. Normal method
* synchronization typically suffices for that.</p>
*
* <p>See notes on related methods as well as currentCopy() below.</p>
*/
private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser();
private final AdaptiveFetchCache adaptiveFetchCache;
private Object lockedFor;
private long nextUniqueID = 1;
/**
* <p>The estimated server response size since we last consumed the input stream from the server, in
* bytes.</p>
*
* <p>Starts at zero, reset by every Sync message. Mainly used for batches.</p>
*
* <p>Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.</p>
*/
private int estimatedReceiveBufferBytes;
@SuppressWarnings("this-escape")
public QueryExecutorImpl(PGStream pgStream,
@ -177,20 +296,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
return 3;
}
/**
* <p>Supplement to synchronization of public methods on current QueryExecutor.</p>
*
* <p>Necessary for keeping the connection intact between calls to public methods sharing a state
* such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access
* point.</p>
*
* <p>Public methods sharing that state must then be synchronized among themselves. Normal method
* synchronization typically suffices for that.</p>
*
* <p>See notes on related methods as well as currentCopy() below.</p>
*/
private Object lockedFor;
/**
* Obtain lock over this connection for given object, blocking to wait if necessary.
*
@ -239,6 +344,10 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
//
// Copy subprotocol implementation
//
/**
* @param holder object assumed to hold the lock
* @return whether given object actually holds the lock
@ -257,10 +366,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
return lockedFor == holder;
}
//
// Query parsing
//
@Override
public Query createSimpleQuery(String sql) throws SQLException {
List<NativeQuery> queries = Parser.parseJdbcSql(sql,
@ -304,10 +409,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
return new CompositeQuery(subqueries, offsets);
}
//
// Query execution
//
private int updateQueryMode(int flags) {
switch (getPreferQueryMode()) {
case SIMPLE:
@ -436,7 +537,7 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
private void releaseSavePoint(boolean autosave, int flags) throws SQLException {
if ( autosave
if (autosave
&& getAutoSave() == AutoSave.ALWAYS
&& getTransactionState() == TransactionState.OPEN) {
try {
@ -466,56 +567,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
throw e;
}
// Deadlock avoidance:
//
// It's possible for the send and receive streams to get "deadlocked" against each other since
// we do not have a separate thread. The scenario is this: we have two streams:
//
// driver -> TCP buffering -> server
// server -> TCP buffering -> driver
//
// The server behaviour is roughly:
// while true:
// read message
// execute message
// write results
//
// If the server -> driver stream has a full buffer, the write will block.
// If the driver is still writing when this happens, and the driver -> server
// stream also fills up, we deadlock: the driver is blocked on write() waiting
// for the server to read some more data, and the server is blocked on write()
// waiting for the driver to read some more data.
//
// To avoid this, we guess at how much response data we can request from the
// server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES).
// This is the point where the server blocks on write and stops reading data. If we
// reach this point, we force a Sync message and read pending data from the server
// until ReadyForQuery, then go back to writing more queries unless we saw an error.
//
// This is not 100% reliable -- it's only done in the batch-query case and only
// at a reasonably high level (per query, not per message), and it's only an estimate
// -- so it might break. To do it correctly in all cases would seem to require a
// separate send or receive thread as we can only do the Sync-and-read-results
// operation at particular points, and also as we don't really know how much data
// the server is sending.
//
// Our message size estimation is coarse, and disregards asynchronous
// notifications, warnings/info/debug messages, etc, so the response size may be
// quite different from the 250 bytes assumed here even for queries that don't
// return data.
//
// See github issue #194 and #195 .
//
// Assume 64k server->client buffering, which is extremely conservative. A typical
// system will have 200kb or more of buffers for its receive buffers, and the sending
// system will typically have the same on the send side, giving us 400kb or to work
// with. (We could check Java's receive buffer size, but prefer to assume a very
// conservative buffer instead, and we don't know how big the server's send
// buffer is.)
//
private static final int MAX_BUFFERED_RECV_BYTES = 64000;
private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250;
@Override
public void execute(Query[] queries, ParameterList[] parameterLists,
BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags) throws SQLException {
@ -594,6 +645,10 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
//
// Message sending
//
private ResultHandler sendQueryPreamble(final ResultHandler delegateHandler, int flags)
throws IOException {
// First, send CloseStatements for finalized SimpleQueries that had statement names assigned.
@ -646,13 +701,9 @@ public class QueryExecutorImpl extends QueryExecutorBase {
};
}
//
// Fastpath
//
@Override
@SuppressWarnings("deprecation")
public byte [] fastpathCall(int fnid, ParameterList parameters,
public byte[] fastpathCall(int fnid, ParameterList parameters,
boolean suppressBegin)
throws SQLException {
try (ResourceLock ignore = lock.obtain()) {
@ -831,7 +882,7 @@ public class QueryExecutorImpl extends QueryExecutorBase {
addWarning(warning);
if (useTimeout) {
long newTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
timeoutMillis = timeoutMillis + (int)(startTime - newTimeMillis); // Overflows after 49 days, ignore that
timeoutMillis = timeoutMillis + (int) (startTime - newTimeMillis); // Overflows after 49 days, ignore that
startTime = newTimeMillis;
if (timeoutMillis == 0) {
timeoutMillis = -1; // Don't accidentally wait forever
@ -868,7 +919,7 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
private byte [] receiveFastpathResult() throws IOException, SQLException {
private byte[] receiveFastpathResult() throws IOException, SQLException {
boolean endQuery = false;
SQLException error = null;
byte[] returnValue = null;
@ -943,10 +994,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
return returnValue;
}
//
// Copy subprotocol implementation
//
/**
* Sends given query to BE to start, initialize and lock connection for a CopyOperation.
*
@ -1006,6 +1053,30 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
//
// Garbage collection of parsed statements.
//
// When a statement is successfully parsed, registerParsedQuery is called.
// This creates a PhantomReference referring to the "owner" of the statement
// (the originating Query object) and inserts that reference as a key in
// parsedQueryMap. The values of parsedQueryMap are the corresponding allocated
// statement names. The originating Query object also holds a reference to the
// PhantomReference.
//
// When the owning Query object is closed, it enqueues and clears the associated
// PhantomReference.
//
// If the owning Query object becomes unreachable (see java.lang.ref javadoc) before
// being closed, the corresponding PhantomReference is enqueued on
// parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued
// when a GC occurs, so this is not necessarily prompt but should eventually happen.
//
// Periodically (currently, just before query execution), the parsedQueryCleanupQueue
// is polled. For each enqueued PhantomReference we find, we remove the corresponding
// entry from parsedQueryMap, obtaining the name of the underlying statement in the
// process. Then we send a message to the backend to deallocate that statement.
//
/**
* Finishes a copy operation and unlocks connection discarding any exchanged data.
*
@ -1171,6 +1242,15 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
//
// Essentially the same strategy is used for the cleanup of portals.
// Note that each Portal holds a reference to the corresponding Query
// that generated it, so the Query won't be collected (and the statement
// closed) until all the Portals are, too. This is required by the mechanics
// of the backend protocol: when a statement is closed, all dependent portals
// are also closed.
//
public void flushCopy(CopyOperationImpl op) throws SQLException {
try (ResourceLock ignore = lock.obtain()) {
if (!hasLock(op)) {
@ -1211,8 +1291,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
AtomicBoolean processingCopyResults = new AtomicBoolean(false);
/**
* Handles copy sub protocol responses from server. Unlocks at end of sub protocol, so operations
* on pgStream or QueryExecutor are not allowed in a method after calling this!
@ -1559,10 +1637,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
//
// Message sending
//
private void sendSync() throws IOException {
LOGGER.log(Level.FINEST, " FE=> Sync");
@ -2053,35 +2127,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
pendingDescribePortalQueue.add(query);
}
//
// Garbage collection of parsed statements.
//
// When a statement is successfully parsed, registerParsedQuery is called.
// This creates a PhantomReference referring to the "owner" of the statement
// (the originating Query object) and inserts that reference as a key in
// parsedQueryMap. The values of parsedQueryMap are the corresponding allocated
// statement names. The originating Query object also holds a reference to the
// PhantomReference.
//
// When the owning Query object is closed, it enqueues and clears the associated
// PhantomReference.
//
// If the owning Query object becomes unreachable (see java.lang.ref javadoc) before
// being closed, the corresponding PhantomReference is enqueued on
// parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued
// when a GC occurs, so this is not necessarily prompt but should eventually happen.
//
// Periodically (currently, just before query execution), the parsedQueryCleanupQueue
// is polled. For each enqueued PhantomReference we find, we remove the corresponding
// entry from parsedQueryMap, obtaining the name of the underlying statement in the
// process. Then we send a message to the backend to deallocate that statement.
//
private final HashMap<PhantomReference<SimpleQuery>, String> parsedQueryMap =
new HashMap<>();
private final ReferenceQueue<SimpleQuery> parsedQueryCleanupQueue =
new ReferenceQueue<>();
private void registerParsedQuery(SimpleQuery query, String statementName) {
if (statementName == null) {
return;
@ -2102,21 +2147,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
//
// Essentially the same strategy is used for the cleanup of portals.
// Note that each Portal holds a reference to the corresponding Query
// that generated it, so the Query won't be collected (and the statement
// closed) until all the Portals are, too. This is required by the mechanics
// of the backend protocol: when a statement is closed, all dependent portals
// are also closed.
//
private final HashMap<PhantomReference<Portal>, String> openPortalMap =
new HashMap<>();
private final ReferenceQueue<Portal> openPortalCleanupQueue = new ReferenceQueue<>();
private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed");
private void registerOpenPortal(Portal portal) {
if (portal == UNNAMED_PORTAL) {
return; // Using the unnamed portal.
@ -2630,13 +2660,13 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
@Override
public void setAdaptiveFetch(boolean adaptiveFetch) {
this.adaptiveFetchCache.setAdaptiveFetch(adaptiveFetch);
public boolean getAdaptiveFetch() {
return this.adaptiveFetchCache.getAdaptiveFetch();
}
@Override
public boolean getAdaptiveFetch() {
return this.adaptiveFetchCache.getAdaptiveFetch();
public void setAdaptiveFetch(boolean adaptiveFetch) {
this.adaptiveFetchCache.setAdaptiveFetch(adaptiveFetch);
}
@Override
@ -2934,17 +2964,13 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
public void setTimeZone(TimeZone timeZone) {
this.timeZone = timeZone;
}
@Override
public TimeZone getTimeZone() {
return timeZone;
}
public void setApplicationName(String applicationName) {
this.applicationName = applicationName;
public void setTimeZone(TimeZone timeZone) {
this.timeZone = timeZone;
}
@Override
@ -2955,6 +2981,10 @@ public class QueryExecutorImpl extends QueryExecutorBase {
return applicationName;
}
public void setApplicationName(String applicationName) {
this.applicationName = applicationName;
}
@Override
public ReplicationProtocol getReplicationProtocol() {
return replicationProtocol;
@ -2983,13 +3013,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
@Override
public boolean useBinaryForReceive(int oid) {
synchronized (useBinaryReceiveForOids) {
return useBinaryReceiveForOids.contains(oid);
}
}
@Override
public void setBinaryReceiveOids(Set<Integer> oids) {
synchronized (useBinaryReceiveForOids) {
@ -2998,6 +3021,13 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
@Override
public boolean useBinaryForReceive(int oid) {
synchronized (useBinaryReceiveForOids) {
return useBinaryReceiveForOids.contains(oid);
}
}
@Override
public void addBinarySendOid(int oid) {
synchronized (useBinarySendForOids) {
@ -3021,13 +3051,6 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
@Override
public boolean useBinaryForSend(int oid) {
synchronized (useBinarySendForOids) {
return useBinarySendForOids.contains(oid);
}
}
@Override
public void setBinarySendOids(Set<Integer> oids) {
synchronized (useBinarySendForOids) {
@ -3036,8 +3059,11 @@ public class QueryExecutorImpl extends QueryExecutorBase {
}
}
private void setIntegerDateTimes(boolean state) {
integerDateTimes = state;
@Override
public boolean useBinaryForSend(int oid) {
synchronized (useBinarySendForOids) {
return useBinarySendForOids.contains(oid);
}
}
@Override
@ -3045,58 +3071,7 @@ public class QueryExecutorImpl extends QueryExecutorBase {
return integerDateTimes;
}
private final Deque<SimpleQuery> pendingParseQueue = new ArrayDeque<>();
private final Deque<Portal> pendingBindQueue = new ArrayDeque<>();
private final Deque<ExecuteRequest> pendingExecuteQueue = new ArrayDeque<>();
private final Deque<DescribeRequest> pendingDescribeStatementQueue =
new ArrayDeque<>();
private final Deque<SimpleQuery> pendingDescribePortalQueue = new ArrayDeque<>();
private long nextUniqueID = 1;
private final boolean allowEncodingChanges;
private final boolean cleanupSavePoints;
/**
* <p>The estimated server response size since we last consumed the input stream from the server, in
* bytes.</p>
*
* <p>Starts at zero, reset by every Sync message. Mainly used for batches.</p>
*
* <p>Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.</p>
*/
private int estimatedReceiveBufferBytes;
private final SimpleQuery beginTransactionQuery =
new SimpleQuery(
new NativeQuery("BEGIN", null, false, SqlCommand.BLANK),
null, false);
private final SimpleQuery beginReadOnlyTransactionQuery =
new SimpleQuery(
new NativeQuery("BEGIN READ ONLY", null, false, SqlCommand.BLANK),
null, false);
private final SimpleQuery emptyQuery =
new SimpleQuery(
new NativeQuery("", null, false,
SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)
), null, false);
private final SimpleQuery autoSaveQuery =
new SimpleQuery(
new NativeQuery("SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
null, false);
private final SimpleQuery releaseAutoSave =
new SimpleQuery(
new NativeQuery("RELEASE SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
null, false);
/*
In autosave mode we use this query to roll back errored transactions
*/
private final SimpleQuery restoreToAutoSave =
new SimpleQuery(
new NativeQuery("ROLLBACK TO SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
null, false);
private void setIntegerDateTimes(boolean state) {
integerDateTimes = state;
}
}

View file

@ -6,6 +6,11 @@
package org.postgresql.core.v3;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.Arrays;
import org.postgresql.core.Oid;
import org.postgresql.core.PGStream;
import org.postgresql.core.ParameterList;
@ -20,18 +25,12 @@ import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.StreamWrapper;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.Arrays;
/**
* Parameter list for a single-statement V3 query.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
class SimpleParameterList implements V3ParameterList {
public class SimpleParameterList implements V3ParameterList {
private static final byte IN = 1;
private static final byte OUT = 2;
@ -39,8 +38,19 @@ class SimpleParameterList implements V3ParameterList {
private static final byte TEXT = 0;
private static final byte BINARY = 4;
/**
* Marker object representing NULL; this distinguishes "parameter never set" from "parameter set
* to null".
*/
private static final Object NULL_OBJECT = new Object();
private final Object[] paramValues;
private final int[] paramTypes;
private final byte[] flags;
private final byte[][] encoded;
private final TypeTransferModeRegistry transferModeRegistry;
private int pos;
SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) {
public SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) {
this.paramValues = new Object[paramCount];
this.paramTypes = new int[paramCount];
this.encoded = new byte[paramCount][];
@ -48,6 +58,72 @@ class SimpleParameterList implements V3ParameterList {
this.transferModeRegistry = transferModeRegistry;
}
/**
* <p>Escapes a given text value as a literal, wraps it in single quotes, casts it to the
* to the given data type, and finally wraps the whole thing in parentheses.</p>
*
* <p>For example, "123" and "int4" becomes "('123'::int)"</p>
*
* <p>The additional parentheses is added to ensure that the surrounding text of where the
* parameter value is entered does modify the interpretation of the value.</p>
*
* <p>For example if our input SQL is: <code>SELECT ?b</code></p>
*
* <p>Using a parameter value of '{}' and type of json we'd get:</p>
*
* <pre>
* test=# SELECT ('{}'::json)b;
* b
* ----
* {}
* </pre>
*
* <p>But without the parentheses the result changes:</p>
*
* <pre>
* test=# SELECT '{}'::jsonb;
* jsonb
* -------
* {}
* </pre>
**/
private static String quoteAndCast(String text, String type, boolean standardConformingStrings) {
StringBuilder sb = new StringBuilder((text.length() + 10) / 10 * 11); // Add 10% for escaping.
sb.append("('");
try {
Utils.escapeLiteral(sb, text, standardConformingStrings);
} catch (SQLException e) {
// This should only happen if we have an embedded null
// and there's not much we can do if we do hit one.
//
// To force a server side failure, we deliberately include
// a zero byte character in the literal to force the server
// to reject the command.
sb.append('\u0000');
}
sb.append("'");
if (type != null) {
sb.append("::");
sb.append(type);
}
sb.append(")");
return sb.toString();
}
private static void streamBytea(PGStream pgStream, StreamWrapper wrapper) throws IOException {
byte[] rawData = wrapper.getBytes();
if (rawData != null) {
pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength());
return;
}
pgStream.sendStream(wrapper.getStream(), wrapper.getLength());
}
private static void streamBytea(PGStream pgStream, ByteStreamWriter writer) throws IOException {
pgStream.send(writer);
}
@Override
public void registerOutParameter(int index, int sqlType) throws SQLException {
if (index < 1 || index > paramValues.length) {
@ -140,11 +216,19 @@ class SimpleParameterList implements V3ParameterList {
bind(index, value, oid, BINARY);
}
//
// bytea helper
//
@Override
public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
bind(index, new StreamWrapper(data, offset, length), Oid.BYTEA, BINARY);
}
//
// byte stream writer support
//
@Override
public void setBytea(int index, InputStream stream, int length) throws SQLException {
bind(index, new StreamWrapper(stream, length), Oid.BYTEA, BINARY);
@ -155,6 +239,10 @@ class SimpleParameterList implements V3ParameterList {
bind(index, new StreamWrapper(stream), Oid.BYTEA, BINARY);
}
//
// Package-private V3 accessors
//
@Override
public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
bind(index, writer, Oid.BYTEA, BINARY);
@ -176,58 +264,6 @@ class SimpleParameterList implements V3ParameterList {
bind(index, NULL_OBJECT, oid, binaryTransfer);
}
/**
* <p>Escapes a given text value as a literal, wraps it in single quotes, casts it to the
* to the given data type, and finally wraps the whole thing in parentheses.</p>
*
* <p>For example, "123" and "int4" becomes "('123'::int)"</p>
*
* <p>The additional parentheses is added to ensure that the surrounding text of where the
* parameter value is entered does modify the interpretation of the value.</p>
*
* <p>For example if our input SQL is: <code>SELECT ?b</code></p>
*
* <p>Using a parameter value of '{}' and type of json we'd get:</p>
*
* <pre>
* test=# SELECT ('{}'::json)b;
* b
* ----
* {}
* </pre>
*
* <p>But without the parentheses the result changes:</p>
*
* <pre>
* test=# SELECT '{}'::jsonb;
* jsonb
* -------
* {}
* </pre>
**/
private static String quoteAndCast(String text, String type, boolean standardConformingStrings) {
StringBuilder sb = new StringBuilder((text.length() + 10) / 10 * 11); // Add 10% for escaping.
sb.append("('");
try {
Utils.escapeLiteral(sb, text, standardConformingStrings);
} catch (SQLException e) {
// This should only happen if we have an embedded null
// and there's not much we can do if we do hit one.
//
// To force a server side failure, we deliberately include
// a zero byte character in the literal to force the server
// to reject the command.
sb.append('\u0000');
}
sb.append("'");
if (type != null) {
sb.append("::");
sb.append(type);
}
sb.append(")");
return sb.toString();
}
@Override
public String toString(int index, boolean standardConformingStrings) {
--index;
@ -389,37 +425,11 @@ class SimpleParameterList implements V3ParameterList {
}
}
//
// bytea helper
//
private static void streamBytea(PGStream pgStream, StreamWrapper wrapper) throws IOException {
byte[] rawData = wrapper.getBytes();
if (rawData != null) {
pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength());
return;
}
pgStream.sendStream(wrapper.getStream(), wrapper.getLength());
}
//
// byte stream writer support
//
private static void streamBytea(PGStream pgStream, ByteStreamWriter writer) throws IOException {
pgStream.send(writer);
}
@Override
public int[] getTypeOIDs() {
return paramTypes;
}
//
// Package-private V3 accessors
//
int getTypeOID(int index) {
return paramTypes[index - 1];
}
@ -545,7 +555,7 @@ class SimpleParameterList implements V3ParameterList {
}
@Override
public SimpleParameterList [] getSubparams() {
public SimpleParameterList[] getSubparams() {
return null;
}
@ -565,13 +575,13 @@ class SimpleParameterList implements V3ParameterList {
}
@Override
public byte[] [] getEncoding() {
public byte[][] getEncoding() {
return encoded;
}
@Override
public void appendAll(ParameterList list) throws SQLException {
if (list instanceof SimpleParameterList ) {
if (list instanceof SimpleParameterList) {
/* only v3.SimpleParameterList is compatible with this type
we need to create copies of our parameters, otherwise the values can be changed */
SimpleParameterList spl = (SimpleParameterList) list;
@ -592,6 +602,7 @@ class SimpleParameterList implements V3ParameterList {
/**
* Useful implementation of toString.
*
* @return String representation of the list values
*/
@Override
@ -606,18 +617,4 @@ class SimpleParameterList implements V3ParameterList {
ts.append("]>");
return ts.toString();
}
private final Object[] paramValues;
private final int[] paramTypes;
private final byte[] flags;
private final byte[] [] encoded;
private final TypeTransferModeRegistry transferModeRegistry;
/**
* Marker object representing NULL; this distinguishes "parameter never set" from "parameter set
* to null".
*/
private static final Object NULL_OBJECT = new Object();
private int pos;
}

View file

@ -6,6 +6,12 @@
package org.postgresql.core.v3;
import java.lang.ref.PhantomReference;
import java.nio.charset.StandardCharsets;
import java.util.BitSet;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.postgresql.core.Field;
import org.postgresql.core.NativeQuery;
import org.postgresql.core.Oid;
@ -14,13 +20,6 @@ import org.postgresql.core.Query;
import org.postgresql.core.SqlCommand;
import org.postgresql.jdbc.PgResultSet;
import java.lang.ref.PhantomReference;
import java.nio.charset.StandardCharsets;
import java.util.BitSet;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* V3 Query implementation for a single-statement query. This also holds the state of any associated
* server-side named statement. We use a PhantomReference managed by the QueryExecutor to handle
@ -29,7 +28,32 @@ import java.util.logging.Logger;
* @author Oliver Jowett (oliver@opencloud.com)
*/
class SimpleQuery implements Query {
static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null);
private static final Logger LOGGER = Logger.getLogger(SimpleQuery.class.getName());
private final NativeQuery nativeQuery;
private final TypeTransferModeRegistry transferModeRegistry;
private final boolean sanitiserDisabled;
private Map<String, Integer> resultSetColumnNameIndexMap;
private String statementName;
private byte[] encodedStatementName;
/**
* The stored fields from previous execution or describe of a prepared statement. Always null for
* non-prepared statements.
*/
private Field[] fields;
//
// Implementation guts
//
private boolean needUpdateFieldFormats;
private boolean hasBinaryFields;
private boolean portalDescribed;
private boolean statementDescribed;
private PhantomReference<?> cleanupRef;
private int[] preparedTypes;
private BitSet unspecifiedParams;
private short deallocateEpoch;
private Integer cachedMaxResultRowSize;
SimpleQuery(SimpleQuery src) {
this(src.nativeQuery, src.transferModeRegistry, src.sanitiserDisabled);
@ -67,7 +91,7 @@ class SimpleQuery implements Query {
}
@Override
public SimpleQuery [] getSubqueries() {
public SimpleQuery[] getSubqueries() {
return null;
}
@ -108,10 +132,6 @@ class SimpleQuery implements Query {
return maxResultRowSize;
}
//
// Implementation guts
//
@Override
public String getNativeSql() {
return nativeQuery.nativeSql;
@ -124,6 +144,10 @@ class SimpleQuery implements Query {
this.deallocateEpoch = deallocateEpoch;
}
int[] getPrepareTypes() {
return preparedTypes;
}
void setPrepareTypes(int[] paramTypes) {
// Remember which parameters were unspecified since the parameters will be overridden later by
// ParameterDescription message
@ -146,10 +170,6 @@ class SimpleQuery implements Query {
System.arraycopy(paramTypes, 0, this.preparedTypes, 0, paramTypes.length);
}
int [] getPrepareTypes() {
return preparedTypes;
}
String getStatementName() {
return statementName;
}
@ -210,33 +230,33 @@ class SimpleQuery implements Query {
return this.unspecifiedParams != null && !this.unspecifiedParams.isEmpty();
}
byte [] getEncodedStatementName() {
byte[] getEncodedStatementName() {
return encodedStatementName;
}
/**
* Sets the fields that this query will return.
*
* @param fields The fields that this query will return.
*/
void setFields(Field [] fields) {
this.fields = fields;
this.resultSetColumnNameIndexMap = null;
this.cachedMaxResultRowSize = null;
this.needUpdateFieldFormats = fields != null;
this.hasBinaryFields = false; // just in case
}
/**
* Returns the fields that this query will return. If the result set fields are not known returns
* null.
*
* @return the fields that this query will return.
*/
Field [] getFields() {
Field[] getFields() {
return fields;
}
/**
* Sets the fields that this query will return.
*
* @param fields The fields that this query will return.
*/
void setFields(Field[] fields) {
this.fields = fields;
this.resultSetColumnNameIndexMap = null;
this.cachedMaxResultRowSize = null;
this.needUpdateFieldFormats = fields != null;
this.hasBinaryFields = false; // just in case
}
/**
* Returns true if current query needs field formats be adjusted as per connection configuration.
* Subsequent invocations would return {@code false}. The idea is to perform adjustments only
@ -334,8 +354,6 @@ class SimpleQuery implements Query {
return nativeQuery.bindPositions.length * getBatchSize();
}
private Map<String, Integer> resultSetColumnNameIndexMap;
@Override
public Map<String, Integer> getResultSetColumnNameIndexMap() {
Map<String, Integer> columnPositions = this.resultSetColumnNameIndexMap;
@ -354,28 +372,4 @@ class SimpleQuery implements Query {
public SqlCommand getSqlCommand() {
return nativeQuery.getCommand();
}
private final NativeQuery nativeQuery;
private final TypeTransferModeRegistry transferModeRegistry;
private String statementName;
private byte [] encodedStatementName;
/**
* The stored fields from previous execution or describe of a prepared statement. Always null for
* non-prepared statements.
*/
private Field [] fields;
private boolean needUpdateFieldFormats;
private boolean hasBinaryFields;
private boolean portalDescribed;
private boolean statementDescribed;
private final boolean sanitiserDisabled;
private PhantomReference<?> cleanupRef;
private int [] preparedTypes;
private BitSet unspecifiedParams;
private short deallocateEpoch;
private Integer cachedMaxResultRowSize;
static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null);
}

View file

@ -8,6 +8,7 @@ package org.postgresql.core.v3;
public interface TypeTransferModeRegistry {
/**
* Returns if given oid should be sent in binary format.
*
* @param oid type oid
* @return true if given oid should be sent in binary format
*/
@ -15,6 +16,7 @@ public interface TypeTransferModeRegistry {
/**
* Returns if given oid should be received in binary format.
*
* @param oid type oid
* @return true if given oid should be received in binary format
*/

View file

@ -6,9 +6,8 @@
package org.postgresql.core.v3;
import org.postgresql.core.ParameterList;
import java.sql.SQLException;
import org.postgresql.core.ParameterList;
/**
* Common interface for all V3 parameter list implementations.
@ -38,23 +37,26 @@ interface V3ParameterList extends ParameterList {
* @return an array of single-statement parameter lists, or <code>null</code> if this object is
* already a single-statement parameter list.
*/
SimpleParameterList [] getSubparams();
SimpleParameterList[] getSubparams();
/**
* Return the parameter type information.
*
* @return an array of {@link org.postgresql.core.Oid} type information
*/
int [] getParamTypes();
int[] getParamTypes();
/**
* Return the flags for each parameter.
*
* @return an array of bytes used to store flags.
*/
byte [] getFlags();
byte[] getFlags();
/**
* Return the encoding for each parameter.
*
* @return nested byte array of bytes with encoding information.
*/
byte [] [] getEncoding();
byte[][] getEncoding();
}

View file

@ -5,13 +5,12 @@
package org.postgresql.core.v3.adaptivefetch;
import org.postgresql.PGProperty;
import org.postgresql.core.Query;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.postgresql.PGProperty;
import org.postgresql.core.Query;
/**
* The main purpose of this class is to handle adaptive fetching process. Adaptive fetching is used
@ -26,8 +25,8 @@ import java.util.Properties;
public class AdaptiveFetchCache {
private final Map<String, AdaptiveFetchCacheEntry> adaptiveFetchInfoMap;
private boolean adaptiveFetch;
private final int minimumAdaptiveFetchSize;
private boolean adaptiveFetch;
private int maximumAdaptiveFetchSize = -1;
private long maximumResultBufferSize = -1;

View file

@ -5,14 +5,6 @@
package org.postgresql.core.v3.replication;
import org.postgresql.copy.CopyDual;
import org.postgresql.replication.LogSequenceNumber;
import org.postgresql.replication.PGReplicationStream;
import org.postgresql.replication.ReplicationType;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.sql.SQLException;
@ -20,11 +12,18 @@ import java.util.Date;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.postgresql.copy.CopyDual;
import org.postgresql.replication.LogSequenceNumber;
import org.postgresql.replication.PGReplicationStream;
import org.postgresql.replication.ReplicationType;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
public class V3PGReplicationStream implements PGReplicationStream {
private static final Logger LOGGER = Logger.getLogger(V3PGReplicationStream.class.getName());
public static final long POSTGRES_EPOCH_2000_01_01 = 946684800000L;
private static final Logger LOGGER = Logger.getLogger(V3PGReplicationStream.class.getName());
private static final long NANOS_PER_MILLISECOND = 1000000L;
private final CopyDual copyDual;
@ -176,7 +175,7 @@ public class V3PGReplicationStream implements PGReplicationStream {
private boolean isTimeUpdate() {
/* a value of 0 disables automatic updates */
if ( updateInterval == 0 ) {
if (updateInterval == 0) {
return false;
}
long diff = System.nanoTime() - lastStatusUpdate;

View file

@ -5,6 +5,11 @@
package org.postgresql.core.v3.replication;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.postgresql.copy.CopyDual;
import org.postgresql.core.PGStream;
import org.postgresql.core.QueryExecutor;
@ -18,12 +23,6 @@ import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
public class V3ReplicationProtocol implements ReplicationProtocol {
private static final Logger LOGGER = Logger.getLogger(V3ReplicationProtocol.class.getName());

View file

@ -5,17 +5,15 @@
package org.postgresql.ds;
import org.postgresql.ds.common.BaseDataSource;
import org.postgresql.util.DriverInfo;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.sql.SQLException;
import javax.sql.ConnectionPoolDataSource;
import javax.sql.PooledConnection;
import org.postgresql.ds.common.BaseDataSource;
import org.postgresql.util.DriverInfo;
/**
* PostgreSQL implementation of ConnectionPoolDataSource. The app server or middleware vendor should

View file

@ -5,12 +5,6 @@
package org.postgresql.ds;
import org.postgresql.PGConnection;
import org.postgresql.PGStatement;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@ -22,11 +16,15 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.LinkedList;
import java.util.List;
import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.PooledConnection;
import javax.sql.StatementEventListener;
import org.postgresql.PGConnection;
import org.postgresql.PGStatement;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* PostgreSQL implementation of the PooledConnection interface. This shouldn't be used directly, as
@ -38,11 +36,27 @@ import javax.sql.StatementEventListener;
*/
@SuppressWarnings("rawtypes")
public class PGPooledConnection implements PooledConnection {
// Classes we consider fatal.
private static final String[] fatalClasses = {
"08", // connection error
"53", // insufficient resources
// nb: not just "57" as that includes query cancel which is nonfatal
"57P01", // admin shutdown
"57P02", // crash shutdown
"57P03", // cannot connect now
"58", // system error (backend)
"60", // system error (driver)
"99", // unexpected error
"F0", // configuration file error (backend)
"XX", // internal error (backend)
};
private final List<ConnectionEventListener> listeners = new LinkedList<>();
private Connection con;
private ConnectionHandler last;
private final boolean autoCommit;
private final boolean isXA;
private Connection con;
private ConnectionHandler last;
/**
* Creates a new PooledConnection representing the specified physical connection.
@ -61,6 +75,25 @@ public class PGPooledConnection implements PooledConnection {
this(con, autoCommit, false);
}
private static boolean isFatalState(String state) {
if (state == null) {
// no info, assume fatal
return true;
}
if (state.length() < 2) {
// no class info, assume fatal
return true;
}
for (String fatalClass : fatalClasses) {
if (state.startsWith(fatalClass)) {
return true; // fatal
}
}
return false;
}
/**
* Adds a listener for close or fatal error events on the connection handed out to a client.
*/
@ -203,42 +236,6 @@ public class PGPooledConnection implements PooledConnection {
return e == null ? new ConnectionEvent(this) : new ConnectionEvent(this, e);
}
// Classes we consider fatal.
private static final String[] fatalClasses = {
"08", // connection error
"53", // insufficient resources
// nb: not just "57" as that includes query cancel which is nonfatal
"57P01", // admin shutdown
"57P02", // crash shutdown
"57P03", // cannot connect now
"58", // system error (backend)
"60", // system error (driver)
"99", // unexpected error
"F0", // configuration file error (backend)
"XX", // internal error (backend)
};
private static boolean isFatalState(String state) {
if (state == null) {
// no info, assume fatal
return true;
}
if (state.length() < 2) {
// no class info, assume fatal
return true;
}
for (String fatalClass : fatalClasses) {
if (state.startsWith(fatalClass)) {
return true; // fatal
}
}
return false;
}
/**
* Fires a connection error event, but only if we think the exception is fatal.
*
@ -252,6 +249,14 @@ public class PGPooledConnection implements PooledConnection {
fireConnectionFatalError(e);
}
@Override
public void removeStatementEventListener(StatementEventListener listener) {
}
@Override
public void addStatementEventListener(StatementEventListener listener) {
}
/**
* Instead of declaring a class implementing Connection, which would have to be updated for every
* JDK rev, use a dynamic proxy to handle all calls through the Connection interface. This is the
@ -452,12 +457,4 @@ public class PGPooledConnection implements PooledConnection {
}
}
@Override
public void removeStatementEventListener(StatementEventListener listener) {
}
@Override
public void addStatementEventListener(StatementEventListener listener) {
}
}

View file

@ -5,13 +5,6 @@
package org.postgresql.ds;
import org.postgresql.ds.common.BaseDataSource;
import org.postgresql.jdbc.ResourceLock;
import org.postgresql.util.DriverInfo;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Stack;
@ -19,7 +12,6 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import javax.naming.NamingException;
import javax.naming.Reference;
import javax.naming.StringRefAddr;
@ -27,6 +19,12 @@ import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.DataSource;
import javax.sql.PooledConnection;
import org.postgresql.ds.common.BaseDataSource;
import org.postgresql.jdbc.ResourceLock;
import org.postgresql.util.DriverInfo;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* DataSource which uses connection pooling. <span style="color: red;">Don't use this if your
@ -57,7 +55,6 @@ import javax.sql.PooledConnection;
* </p>
*
* @author Aaron Mulder (ammulder@chariotsolutions.com)
*
* @deprecated Since 42.0.0, instead of this class you should use a fully featured connection pool
* like HikariCP, vibur-dbcp, commons-dbcp, c3p0, etc.
*/
@ -66,24 +63,63 @@ import javax.sql.PooledConnection;
public class PGPoolingDataSource extends BaseDataSource implements DataSource {
protected static ConcurrentMap<String, PGPoolingDataSource> dataSources =
new ConcurrentHashMap<>();
public static PGPoolingDataSource getDataSource(String name) {
return dataSources.get(name);
}
private final Stack<PooledConnection> available = new Stack<>();
private final Stack<PooledConnection> used = new Stack<>();
private final ResourceLock lock = new ResourceLock();
private final Condition lockCondition = lock.newCondition();
// Additional Data Source properties
protected String dataSourceName; // Must be protected for subclasses to sync updates to it
private int initialConnections;
private int maxConnections;
// State variables
private boolean initialized;
private final Stack<PooledConnection> available = new Stack<>();
private final Stack<PooledConnection> used = new Stack<>();
private boolean isClosed;
private final ResourceLock lock = new ResourceLock();
private final Condition lockCondition = lock.newCondition();
/**
* Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection.
* This is the only way connections are marked as unused.
*/
private final ConnectionEventListener connectionEventListener = new ConnectionEventListener() {
@Override
public void connectionClosed(ConnectionEvent event) {
((PooledConnection) event.getSource()).removeConnectionEventListener(this);
try (ResourceLock ignore = lock.obtain()) {
if (isClosed) {
return; // DataSource has been closed
}
boolean removed = used.remove(event.getSource());
if (removed) {
available.push((PooledConnection) event.getSource());
// There's now a new connection available
lockCondition.signal();
} else {
// a connection error occurred
}
}
}
/**
* This is only called for fatal errors, where the physical connection is useless afterward and
* should be removed from the pool.
*/
@Override
public void connectionErrorOccurred(ConnectionEvent event) {
((PooledConnection) event.getSource()).removeConnectionEventListener(this);
try (ResourceLock ignore = lock.obtain()) {
if (isClosed) {
return; // DataSource has been closed
}
used.remove(event.getSource());
// We're now at least 1 connection under the max
lockCondition.signal();
}
}
};
private PGConnectionPoolDataSource source;
public static PGPoolingDataSource getDataSource(String name) {
return dataSources.get(name);
}
/**
* Gets a description of this DataSource.
*/
@ -410,47 +446,6 @@ public class PGPoolingDataSource extends BaseDataSource implements DataSource {
return pc.getConnection();
}
/**
* Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection.
* This is the only way connections are marked as unused.
*/
private final ConnectionEventListener connectionEventListener = new ConnectionEventListener() {
@Override
public void connectionClosed(ConnectionEvent event) {
((PooledConnection) event.getSource()).removeConnectionEventListener(this);
try (ResourceLock ignore = lock.obtain()) {
if (isClosed) {
return; // DataSource has been closed
}
boolean removed = used.remove(event.getSource());
if (removed) {
available.push((PooledConnection) event.getSource());
// There's now a new connection available
lockCondition.signal();
} else {
// a connection error occurred
}
}
}
/**
* This is only called for fatal errors, where the physical connection is useless afterward and
* should be removed from the pool.
*/
@Override
public void connectionErrorOccurred(ConnectionEvent event) {
((PooledConnection) event.getSource()).removeConnectionEventListener(this);
try (ResourceLock ignore = lock.obtain()) {
if (isClosed) {
return; // DataSource has been closed
}
used.remove(event.getSource());
// We're now at least 1 connection under the max
lockCondition.signal();
}
}
};
/**
* Adds custom properties for this DataSource to the properties defined in the superclass.
*/

View file

@ -5,16 +5,14 @@
package org.postgresql.ds;
import org.postgresql.ds.common.BaseDataSource;
import org.postgresql.util.DriverInfo;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.sql.SQLException;
import javax.sql.DataSource;
import org.postgresql.ds.common.BaseDataSource;
import org.postgresql.util.DriverInfo;
/**
* Simple DataSource which does not perform connection pooling. In order to use the DataSource, you

View file

@ -5,16 +5,6 @@
package org.postgresql.ds.common;
import org.postgresql.Driver;
import org.postgresql.PGProperty;
import org.postgresql.jdbc.AutoSave;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.util.ExpressionProperties;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.URLCoder;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@ -28,13 +18,21 @@ import java.util.Arrays;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.naming.NamingException;
import javax.naming.RefAddr;
import javax.naming.Reference;
import javax.naming.Referenceable;
import javax.naming.StringRefAddr;
import javax.sql.CommonDataSource;
import org.postgresql.Driver;
import org.postgresql.PGProperty;
import org.postgresql.jdbc.AutoSave;
import org.postgresql.jdbc.PreferQueryMode;
import org.postgresql.util.ExpressionProperties;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.URLCoder;
/**
* Base class for data sources and related classes.
@ -45,16 +43,6 @@ import javax.sql.CommonDataSource;
public abstract class BaseDataSource implements CommonDataSource, Referenceable {
private static final Logger LOGGER = Logger.getLogger(BaseDataSource.class.getName());
// Standard properties, defined in the JDBC 2.0 Optional Package spec
private String[] serverNames = new String[]{"localhost"};
private String databaseName = "";
private String user;
private String password;
private int[] portNumbers = new int[]{0};
// Map for all other properties
private Properties properties = new Properties();
/*
* Ensure the driver is loaded as JDBC Driver might be invisible to Java's ServiceLoader.
* Usually, {@code Class.forName(...)} is not required as {@link DriverManager} detects JDBC drivers
@ -72,6 +60,23 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
}
// Standard properties, defined in the JDBC 2.0 Optional Package spec
private String[] serverNames = new String[]{"localhost"};
private String databaseName = "";
private String user;
private String password;
private int[] portNumbers = new int[]{0};
// Map for all other properties
private Properties properties = new Properties();
private static String getReferenceProperty(Reference ref, String propertyName) {
RefAddr addr = ref.get(propertyName);
if (addr == null) {
return null;
}
return (String) addr.getContent();
}
/**
* Gets a connection to the PostgreSQL database. The database is identified by the DataSource
* properties serverName, databaseName, and portNumber. The user to connect as is identified by
@ -139,15 +144,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return serverNames[0];
}
/**
* Gets the name of the host(s) the PostgreSQL database is running on.
*
* @return name of the host(s) the PostgreSQL database is running on
*/
public String[] getServerNames() {
return serverNames;
}
/**
* Sets the name of the host the PostgreSQL database is running on. If this is changed, it will
* only affect future calls to getConnection. The default value is {@code localhost}.
@ -160,6 +156,15 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
this.setServerNames(new String[]{serverName});
}
/**
* Gets the name of the host(s) the PostgreSQL database is running on.
*
* @return name of the host(s) the PostgreSQL database is running on
*/
public String[] getServerNames() {
return serverNames;
}
/**
* Sets the name of the host(s) the PostgreSQL database is running on. If this is changed, it will
* only affect future calls to getConnection. The default value is {@code localhost}.
@ -167,7 +172,7 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
* @param serverNames name of the host(s) the PostgreSQL database is running on
*/
@SuppressWarnings("nullness")
public void setServerNames(String [] serverNames) {
public void setServerNames(String[] serverNames) {
if (serverNames == null || serverNames.length == 0) {
this.serverNames = new String[]{"localhost"};
} else {
@ -265,15 +270,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return portNumbers[0];
}
/**
* Gets the port(s) which the PostgreSQL server is listening on for TCP/IP connections.
*
* @return The port(s), or 0 if the default port will be used.
*/
public int[] getPortNumbers() {
return portNumbers;
}
/**
* Sets the port which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
* -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
@ -287,6 +283,15 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
setPortNumbers(new int[]{portNumber});
}
/**
* Gets the port(s) which the PostgreSQL server is listening on for TCP/IP connections.
*
* @return The port(s), or 0 if the default port will be used.
*/
public int[] getPortNumbers() {
return portNumbers;
}
/**
* Sets the port(s) which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
* -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
@ -294,7 +299,7 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
*
* @param portNumbers port(s) which the PostgreSQL server is listening on for TCP/IP
*/
public void setPortNumbers(int [] portNumbers) {
public void setPortNumbers(int[] portNumbers) {
if (portNumbers == null || portNumbers.length == 0) {
portNumbers = new int[]{0};
}
@ -352,7 +357,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @return GSS ResponseTimeout
* @see PGProperty#GSS_RESPONSE_TIMEOUT
*/
@ -361,7 +365,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @param gssResponseTimeout gss response timeout
* @see PGProperty#GSS_RESPONSE_TIMEOUT
*/
@ -370,7 +373,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @return SSL ResponseTimeout
* @see PGProperty#SSL_RESPONSE_TIMEOUT
*/
@ -379,7 +381,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @param sslResponseTimeout ssl response timeout
* @see PGProperty#SSL_RESPONSE_TIMEOUT
*/
@ -459,14 +460,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
PGProperty.SEND_BUFFER_SIZE.set(properties, nbytes);
}
/**
* @param count prepare threshold
* @see PGProperty#PREPARE_THRESHOLD
*/
public void setPrepareThreshold(int count) {
PGProperty.PREPARE_THRESHOLD.set(properties, count);
}
/**
* @return prepare threshold
* @see PGProperty#PREPARE_THRESHOLD
@ -475,6 +468,14 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.PREPARE_THRESHOLD.getIntNoCheck(properties);
}
/**
* @param count prepare threshold
* @see PGProperty#PREPARE_THRESHOLD
*/
public void setPrepareThreshold(int count) {
PGProperty.PREPARE_THRESHOLD.set(properties, count);
}
/**
* @return prepared statement cache size (number of statements per connection)
* @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
@ -539,14 +540,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(properties, cacheSize);
}
/**
* @param fetchSize default fetch size
* @see PGProperty#DEFAULT_ROW_FETCH_SIZE
*/
public void setDefaultRowFetchSize(int fetchSize) {
PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize);
}
/**
* @return default fetch size
* @see PGProperty#DEFAULT_ROW_FETCH_SIZE
@ -556,11 +549,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param unknownLength unknown length
* @see PGProperty#UNKNOWN_LENGTH
* @param fetchSize default fetch size
* @see PGProperty#DEFAULT_ROW_FETCH_SIZE
*/
public void setUnknownLength(int unknownLength) {
PGProperty.UNKNOWN_LENGTH.set(properties, unknownLength);
public void setDefaultRowFetchSize(int fetchSize) {
PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize);
}
/**
@ -572,11 +565,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param seconds socket timeout
* @see PGProperty#SOCKET_TIMEOUT
* @param unknownLength unknown length
* @see PGProperty#UNKNOWN_LENGTH
*/
public void setSocketTimeout(int seconds) {
PGProperty.SOCKET_TIMEOUT.set(properties, seconds);
public void setUnknownLength(int unknownLength) {
PGProperty.UNKNOWN_LENGTH.set(properties, unknownLength);
}
/**
@ -588,11 +581,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param seconds timeout that is used for sending cancel command
* @see PGProperty#CANCEL_SIGNAL_TIMEOUT
* @param seconds socket timeout
* @see PGProperty#SOCKET_TIMEOUT
*/
public void setCancelSignalTimeout(int seconds) {
PGProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds);
public void setSocketTimeout(int seconds) {
PGProperty.SOCKET_TIMEOUT.set(properties, seconds);
}
/**
@ -604,15 +597,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param enabled if SSL is enabled
* @see PGProperty#SSL
* @param seconds timeout that is used for sending cancel command
* @see PGProperty#CANCEL_SIGNAL_TIMEOUT
*/
public void setSsl(boolean enabled) {
if (enabled) {
PGProperty.SSL.set(properties, true);
} else {
PGProperty.SSL.set(properties, false);
}
public void setCancelSignalTimeout(int seconds) {
PGProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds);
}
/**
@ -624,14 +613,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.SSL.getBoolean(properties) || "".equals(PGProperty.SSL.getOrDefault(properties));
}
/**
* @param classname SSL factory class name
* @see PGProperty#SSL_FACTORY
*/
public void setSslfactory(String classname) {
PGProperty.SSL_FACTORY.set(properties, classname);
}
/**
* @return SSL factory class name
* @see PGProperty#SSL_FACTORY
@ -640,6 +621,14 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.SSL_FACTORY.getOrDefault(properties);
}
/**
* @param classname SSL factory class name
* @see PGProperty#SSL_FACTORY
*/
public void setSslfactory(String classname) {
PGProperty.SSL_FACTORY.set(properties, classname);
}
/**
* @return SSL mode
* @see PGProperty#SSL_MODE
@ -770,14 +759,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
PGProperty.SSL_PASSWORD_CALLBACK.set(properties, className);
}
/**
* @param applicationName application name
* @see PGProperty#APPLICATION_NAME
*/
public void setApplicationName(String applicationName) {
PGProperty.APPLICATION_NAME.set(properties, applicationName);
}
/**
* @return application name
* @see PGProperty#APPLICATION_NAME
@ -787,11 +768,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param targetServerType target server type
* @see PGProperty#TARGET_SERVER_TYPE
* @param applicationName application name
* @see PGProperty#APPLICATION_NAME
*/
public void setTargetServerType(String targetServerType) {
PGProperty.TARGET_SERVER_TYPE.set(properties, targetServerType);
public void setApplicationName(String applicationName) {
PGProperty.APPLICATION_NAME.set(properties, applicationName);
}
/**
@ -803,11 +784,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param loadBalanceHosts load balance hosts
* @see PGProperty#LOAD_BALANCE_HOSTS
* @param targetServerType target server type
* @see PGProperty#TARGET_SERVER_TYPE
*/
public void setLoadBalanceHosts(boolean loadBalanceHosts) {
PGProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts);
public void setTargetServerType(String targetServerType) {
PGProperty.TARGET_SERVER_TYPE.set(properties, targetServerType);
}
/**
@ -818,14 +799,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.LOAD_BALANCE_HOSTS.isPresent(properties);
}
/**
* @param hostRecheckSeconds host recheck seconds
* @see PGProperty#HOST_RECHECK_SECONDS
*/
public void setHostRecheckSeconds(int hostRecheckSeconds) {
PGProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds);
}
/**
* @return host recheck seconds
* @see PGProperty#HOST_RECHECK_SECONDS
@ -835,11 +808,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param enabled if TCP keep alive should be enabled
* @see PGProperty#TCP_KEEP_ALIVE
* @param hostRecheckSeconds host recheck seconds
* @see PGProperty#HOST_RECHECK_SECONDS
*/
public void setTcpKeepAlive(boolean enabled) {
PGProperty.TCP_KEEP_ALIVE.set(properties, enabled);
public void setHostRecheckSeconds(int hostRecheckSeconds) {
PGProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds);
}
/**
@ -850,14 +823,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.TCP_KEEP_ALIVE.getBoolean(properties);
}
/**
* @param enabled if TCP no delay should be enabled
* @see PGProperty#TCP_NO_DELAY
*/
public void setTcpNoDelay(boolean enabled) {
PGProperty.TCP_NO_DELAY.set(properties, enabled);
}
/**
* @return true if TCP no delay is enabled
* @see PGProperty#TCP_NO_DELAY
@ -867,11 +832,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param enabled if binary transfer should be enabled
* @see PGProperty#BINARY_TRANSFER
* @param enabled if TCP no delay should be enabled
* @see PGProperty#TCP_NO_DELAY
*/
public void setBinaryTransfer(boolean enabled) {
PGProperty.BINARY_TRANSFER.set(properties, enabled);
public void setTcpNoDelay(boolean enabled) {
PGProperty.TCP_NO_DELAY.set(properties, enabled);
}
/**
@ -883,11 +848,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param oidList list of OIDs that are allowed to use binary transfer
* @see PGProperty#BINARY_TRANSFER_ENABLE
* @param enabled if binary transfer should be enabled
* @see PGProperty#BINARY_TRANSFER
*/
public void setBinaryTransferEnable(String oidList) {
PGProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList);
public void setBinaryTransfer(boolean enabled) {
PGProperty.BINARY_TRANSFER.set(properties, enabled);
}
/**
@ -899,11 +864,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
* @param oidList list of OIDs that are not allowed to use binary transfer
* @see PGProperty#BINARY_TRANSFER_DISABLE
* @param oidList list of OIDs that are allowed to use binary transfer
* @see PGProperty#BINARY_TRANSFER_ENABLE
*/
public void setBinaryTransferDisable(String oidList) {
PGProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList);
public void setBinaryTransferEnable(String oidList) {
PGProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList);
}
/**
@ -914,6 +879,14 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(properties);
}
/**
* @param oidList list of OIDs that are not allowed to use binary transfer
* @see PGProperty#BINARY_TRANSFER_DISABLE
*/
public void setBinaryTransferDisable(String oidList) {
PGProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList);
}
/**
* @return string type
* @see PGProperty#STRING_TYPE
@ -946,14 +919,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
}
/**
* @param disableColumnSanitiser if column sanitizer should be disabled
* @see PGProperty#DISABLE_COLUMN_SANITISER
*/
public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
PGProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser);
}
/**
* @return current schema
* @see PGProperty#CURRENT_SCHEMA
@ -978,14 +943,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.READ_ONLY.getBoolean(properties);
}
/**
* @param readOnly if connection should be readonly
* @see PGProperty#READ_ONLY
*/
public void setReadOnly(boolean readOnly) {
PGProperty.READ_ONLY.set(properties, readOnly);
}
/**
* @return The behavior when set read only
* @see PGProperty#READ_ONLY_MODE
@ -1010,14 +967,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(properties);
}
/**
* @param enabled true if driver should log unclosed connections
* @see PGProperty#LOG_UNCLOSED_CONNECTIONS
*/
public void setLogUnclosedConnections(boolean enabled) {
PGProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled);
}
/**
* @return true if driver should log include detail in server error messages
* @see PGProperty#LOG_SERVER_ERROR_DETAIL
@ -1054,6 +1003,7 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
* This is important in pool-by-transaction scenarios in order to make sure that all the statements
* reaches the same connection that is being initialized. If set then we will group the startup
* parameters in a transaction
*
* @return whether to group startup parameters or not
* @see PGProperty#GROUP_STARTUP_PARAMETERS
*/
@ -1062,7 +1012,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @param groupStartupParameters whether to group startup Parameters in a transaction or not
* @see PGProperty#GROUP_STARTUP_PARAMETERS
*/
@ -1151,7 +1100,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @return GSS encryption mode: disable, prefer or require
*/
public String getGssEncMode() {
@ -1159,7 +1107,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @param mode encryption mode: disable, prefer or require
*/
public void setGssEncMode(String mode) {
@ -1190,14 +1137,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(properties);
}
/**
* @param allow if connection allows encoding changes
* @see PGProperty#ALLOW_ENCODING_CHANGES
*/
public void setAllowEncodingChanges(boolean allow) {
PGProperty.ALLOW_ENCODING_CHANGES.set(properties, allow);
}
/**
* @return socket factory class name
* @see PGProperty#SOCKET_FACTORY
@ -1232,14 +1171,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
PGProperty.SOCKET_FACTORY_ARG.set(properties, socketFactoryArg);
}
/**
* @param replication set to 'database' for logical replication or 'true' for physical replication
* @see PGProperty#REPLICATION
*/
public void setReplication(String replication) {
PGProperty.REPLICATION.set(properties, replication);
}
/**
* @return 'select', "callIfNoReturn', or 'call'
* @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
@ -1264,6 +1195,14 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.REPLICATION.getOrDefault(properties);
}
/**
* @param replication set to 'database' for logical replication or 'true' for physical replication
* @see PGProperty#REPLICATION
*/
public void setReplication(String replication) {
PGProperty.REPLICATION.set(properties, replication);
}
/**
* @return the localSocketAddress
* @see PGProperty#LOCAL_SOCKET_ADDRESS
@ -1282,6 +1221,7 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
/**
* This property is no longer used by the driver and will be ignored.
*
* @return loggerLevel in properties
* @deprecated Configure via java.util.logging
*/
@ -1292,6 +1232,7 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
/**
* This property is no longer used by the driver and will be ignored.
*
* @param loggerLevel loggerLevel to set, will be ignored
* @deprecated Configure via java.util.logging
*/
@ -1302,6 +1243,7 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
/**
* This property is no longer used by the driver and will be ignored.
*
* @return loggerFile in properties
* @deprecated Configure via java.util.logging
*/
@ -1313,6 +1255,7 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
/**
* This property is no longer used by the driver and will be ignored.
*
* @param loggerFile will be ignored
* @deprecated Configure via java.util.logging
*/
@ -1372,15 +1315,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return url.toString();
}
/**
* Generates a {@link DriverManager} URL from the other properties supplied.
*
* @return {@link DriverManager} URL from the other properties supplied
*/
public String getURL() {
return getUrl();
}
/**
* Sets properties from a {@link DriverManager} URL.
*
@ -1400,6 +1334,15 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
}
/**
* Generates a {@link DriverManager} URL from the other properties supplied.
*
* @return {@link DriverManager} URL from the other properties supplied
*/
public String getURL() {
return getUrl();
}
/**
* Sets properties from a {@link DriverManager} URL.
* Added to follow convention used in other DBMS.
@ -1411,7 +1354,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @return the class name to use for the Authentication Plugin.
* This can be null in which case the default password authentication plugin will be used
*/
@ -1420,11 +1362,9 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
/**
*
* @param className name of a class which implements {@link org.postgresql.plugin.AuthenticationPlugin}
* This class will be used to get the encoded bytes to be sent to the server as the
* password to authenticate the user.
*
*/
public void setAuthenticationPluginClassName(String className) {
PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.set(properties, className);
@ -1564,14 +1504,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
}
}
private static String getReferenceProperty(Reference ref, String propertyName) {
RefAddr addr = ref.get(propertyName);
if (addr == null) {
return null;
}
return (String) addr.getContent();
}
protected void writeBaseObject(ObjectOutputStream out) throws IOException {
out.writeObject(serverNames);
out.writeObject(databaseName);
@ -1660,14 +1592,6 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(properties);
}
/**
* @param reWrite boolean value to set the property in the properties collection
* @see PGProperty#REWRITE_BATCHED_INSERTS
*/
public void setReWriteBatchedInserts(boolean reWrite) {
PGProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite);
}
/**
* @return boolean indicating property is enabled or not.
* @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
@ -1729,15 +1653,22 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
PGProperty.XML_FACTORY_FACTORY.set(properties, xmlFactoryFactory);
}
/*
* Alias methods below, these are to help with ease-of-use with other database tools / frameworks
* which expect normal java bean getters / setters to exist for the property names.
*/
public boolean isSsl() {
return getSsl();
}
/**
* @param enabled if SSL is enabled
* @see PGProperty#SSL
*/
public void setSsl(boolean enabled) {
if (enabled) {
PGProperty.SSL.set(properties, true);
} else {
PGProperty.SSL.set(properties, false);
}
}
public String getSslfactoryarg() {
return getSslFactoryArg();
}
@ -1762,6 +1693,11 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
setSslMode(mode);
}
/*
* Alias methods below, these are to help with ease-of-use with other database tools / frameworks
* which expect normal java bean getters / setters to exist for the property names.
*/
public String getSslhostnameverifier() {
return getSslHostnameVerifier();
}
@ -1814,26 +1750,74 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
return getAllowEncodingChanges();
}
/**
* @param allow if connection allows encoding changes
* @see PGProperty#ALLOW_ENCODING_CHANGES
*/
public void setAllowEncodingChanges(boolean allow) {
PGProperty.ALLOW_ENCODING_CHANGES.set(properties, allow);
}
public boolean isLogUnclosedConnections() {
return getLogUnclosedConnections();
}
/**
* @param enabled true if driver should log unclosed connections
* @see PGProperty#LOG_UNCLOSED_CONNECTIONS
*/
public void setLogUnclosedConnections(boolean enabled) {
PGProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled);
}
public boolean isTcpKeepAlive() {
return getTcpKeepAlive();
}
/**
* @param enabled if TCP keep alive should be enabled
* @see PGProperty#TCP_KEEP_ALIVE
*/
public void setTcpKeepAlive(boolean enabled) {
PGProperty.TCP_KEEP_ALIVE.set(properties, enabled);
}
public boolean isReadOnly() {
return getReadOnly();
}
/**
* @param readOnly if connection should be readonly
* @see PGProperty#READ_ONLY
*/
public void setReadOnly(boolean readOnly) {
PGProperty.READ_ONLY.set(properties, readOnly);
}
public boolean isDisableColumnSanitiser() {
return getDisableColumnSanitiser();
}
/**
* @param disableColumnSanitiser if column sanitizer should be disabled
* @see PGProperty#DISABLE_COLUMN_SANITISER
*/
public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
PGProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser);
}
public boolean isLoadBalanceHosts() {
return getLoadBalanceHosts();
}
/**
* @param loadBalanceHosts load balance hosts
* @see PGProperty#LOAD_BALANCE_HOSTS
*/
public void setLoadBalanceHosts(boolean loadBalanceHosts) {
PGProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts);
}
public boolean isCleanupSavePoints() {
return getCleanupSavepoints();
}
@ -1845,4 +1829,12 @@ public abstract class BaseDataSource implements CommonDataSource, Referenceable
public boolean isReWriteBatchedInserts() {
return getReWriteBatchedInserts();
}
/**
* @param reWrite boolean value to set the property in the properties collection
* @see PGProperty#REWRITE_BATCHED_INSERTS
*/
public void setReWriteBatchedInserts(boolean reWrite) {
PGProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite);
}
}

View file

@ -5,17 +5,15 @@
package org.postgresql.ds.common;
import org.postgresql.ds.PGConnectionPoolDataSource;
import org.postgresql.ds.PGPoolingDataSource;
import org.postgresql.ds.PGSimpleDataSource;
import java.util.Hashtable;
import javax.naming.Context;
import javax.naming.Name;
import javax.naming.RefAddr;
import javax.naming.Reference;
import javax.naming.spi.ObjectFactory;
import org.postgresql.ds.PGConnectionPoolDataSource;
import org.postgresql.ds.PGPoolingDataSource;
import org.postgresql.ds.PGSimpleDataSource;
/**
* Returns a DataSource-ish thing based on a JNDI reference. In the case of a SimpleDataSource or

View file

@ -54,6 +54,21 @@ public class Fastpath {
this.executor = conn.getQueryExecutor();
}
/**
* Creates a FastpathArg with an oid parameter. This is here instead of a constructor of
* FastpathArg because the constructor can't tell the difference between an long that's really
* int8 and a long thats an oid.
*
* @param oid input oid
* @return FastpathArg with an oid parameter
*/
public static FastpathArg createOIDArg(long oid) {
if (oid > Integer.MAX_VALUE) {
oid -= NUM_OIDS;
}
return new FastpathArg((int) oid);
}
/**
* Send a function call to the PostgreSQL backend.
*
@ -95,7 +110,7 @@ public class Fastpath {
* @return null if no data, byte[] otherwise
* @throws SQLException if a database-access error occurs.
*/
public byte [] fastpath(int fnId, FastpathArg[] args) throws SQLException {
public byte[] fastpath(int fnId, FastpathArg[] args) throws SQLException {
// Turn fastpath array into a parameter list.
ParameterList params = executor.createFastpathParameters(args.length);
for (int i = 0; i < args.length; i++) {
@ -143,7 +158,7 @@ public class Fastpath {
* @throws SQLException if name is unknown or if a database-access error occurs.
* @see org.postgresql.largeobject.LargeObject
*/
public byte [] fastpath(String name, FastpathArg[] args) throws SQLException {
public byte[] fastpath(String name, FastpathArg[] args) throws SQLException {
connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
return fastpath(getID(name), args);
}
@ -223,7 +238,7 @@ public class Fastpath {
* @return byte[] array containing result
* @throws SQLException if a database-access error occurs or no result
*/
public byte [] getData(String name, FastpathArg[] args) throws SQLException {
public byte[] getData(String name, FastpathArg[] args) throws SQLException {
return fastpath(name, args);
}
@ -303,19 +318,4 @@ public class Fastpath {
return id;
}
/**
* Creates a FastpathArg with an oid parameter. This is here instead of a constructor of
* FastpathArg because the constructor can't tell the difference between an long that's really
* int8 and a long thats an oid.
*
* @param oid input oid
* @return FastpathArg with an oid parameter
*/
public static FastpathArg createOIDArg(long oid) {
if (oid > Integer.MAX_VALUE) {
oid -= NUM_OIDS;
}
return new FastpathArg((int) oid);
}
}

View file

@ -29,24 +29,10 @@ public class FastpathArg {
/**
* Encoded byte value of argument.
*/
private final byte [] bytes;
private final byte[] bytes;
private final int bytesStart;
private final int bytesLength;
static class ByteStreamWriterFastpathArg extends FastpathArg {
private final ByteStreamWriter writer;
ByteStreamWriterFastpathArg(ByteStreamWriter writer) {
super(null, 0, 0);
this.writer = writer;
}
@Override
void populateParameter(ParameterList params, int index) throws SQLException {
params.setBytea(index, writer);
}
}
/**
* Constructs an argument that consists of an integer value.
*
@ -97,7 +83,7 @@ public class FastpathArg {
* @param off offset within array
* @param len length of data to include
*/
public FastpathArg(byte [] buf, int off, int len) {
public FastpathArg(byte[] buf, int off, int len) {
this.bytes = buf;
this.bytesStart = off;
this.bytesLength = len;
@ -123,4 +109,18 @@ public class FastpathArg {
params.setBytea(index, bytes, bytesStart, bytesLength);
}
}
static class ByteStreamWriterFastpathArg extends FastpathArg {
private final ByteStreamWriter writer;
ByteStreamWriterFastpathArg(ByteStreamWriter writer) {
super(null, 0, 0);
this.writer = writer;
}
@Override
void populateParameter(ParameterList params, int index) throws SQLException {
params.setBytea(index, writer);
}
}
}

View file

@ -23,7 +23,7 @@ public class PGbox extends PGobject implements PGBinaryObject, Serializable, Clo
/**
* These are the two points.
*/
public PGpoint [] point;
public PGpoint[] point;
/**
* @param x1 first x coordinate
@ -60,34 +60,6 @@ public class PGbox extends PGobject implements PGBinaryObject, Serializable, Clo
type = "box";
}
/**
* This method sets the value of this object. It should be overridden, but still called by
* subclasses.
*
* @param value a string representation of the value of the object
* @throws SQLException thrown if value is invalid for this type
*/
@Override
public void setValue(String value) throws SQLException {
if (value == null) {
this.point = null;
return;
}
PGtokenizer t = new PGtokenizer(value, ',');
if (t.getSize() != 2) {
throw new PSQLException(
GT.tr("Conversion to type {0} failed: {1}.", type, value),
PSQLState.DATA_TYPE_MISMATCH);
}
PGpoint[] point = this.point;
if (point == null) {
this.point = point = new PGpoint[2];
}
point[0] = new PGpoint(t.getToken(0));
point[1] = new PGpoint(t.getToken(1));
}
/**
* @param b Definition of this point in PostgreSQL's binary syntax
*/
@ -182,6 +154,34 @@ public class PGbox extends PGobject implements PGBinaryObject, Serializable, Clo
return point == null ? null : point[0].toString() + "," + point[1].toString();
}
/**
* This method sets the value of this object. It should be overridden, but still called by
* subclasses.
*
* @param value a string representation of the value of the object
* @throws SQLException thrown if value is invalid for this type
*/
@Override
public void setValue(String value) throws SQLException {
if (value == null) {
this.point = null;
return;
}
PGtokenizer t = new PGtokenizer(value, ',');
if (t.getSize() != 2) {
throw new PSQLException(
GT.tr("Conversion to type {0} failed: {1}.", type, value),
PSQLState.DATA_TYPE_MISMATCH);
}
PGpoint[] point = this.point;
if (point == null) {
this.point = point = new PGpoint[2];
}
point[0] = new PGpoint(t.getToken(0));
point[1] = new PGpoint(t.getToken(1));
}
@Override
public int lengthInBytes() {
PGpoint[] point = this.point;

View file

@ -64,31 +64,6 @@ public class PGcircle extends PGobject implements Serializable, Cloneable {
type = "circle";
}
/**
* @param s definition of the circle in PostgreSQL's syntax.
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
center = null;
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removeAngle(s), ',');
if (t.getSize() != 2) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
try {
center = new PGpoint(t.getToken(0));
radius = Double.parseDouble(t.getToken(1));
} catch (NumberFormatException e) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH, e);
}
}
/**
* @param obj Object to compare with
* @return true if the two circles are identical
@ -137,4 +112,29 @@ public class PGcircle extends PGobject implements Serializable, Cloneable {
public String getValue() {
return center == null ? null : "<" + center + "," + radius + ">";
}
/**
* @param s definition of the circle in PostgreSQL's syntax.
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
center = null;
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removeAngle(s), ',');
if (t.getSize() != 2) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
try {
center = new PGpoint(t.getToken(0));
radius = Double.parseDouble(t.getToken(1));
} catch (NumberFormatException e) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH, e);
}
}
}

View file

@ -86,6 +86,22 @@ public class PGline extends PGobject implements Serializable, Cloneable {
setValue(point[0], point[1]);
}
/**
* @param s definition of the line in PostgreSQL's syntax.
* @throws SQLException on conversion failure
*/
public PGline(String s) throws SQLException {
this();
setValue(s);
}
/**
* required by the driver.
*/
public PGline() {
type = "line";
}
private void setValue(PGpoint p1, PGpoint p2) {
if (p1 == null || p2 == null) {
isNull = true;
@ -105,55 +121,6 @@ public class PGline extends PGobject implements Serializable, Cloneable {
c = y1 - a * x1;
}
/**
* @param s definition of the line in PostgreSQL's syntax.
* @throws SQLException on conversion failure
*/
public PGline(String s) throws SQLException {
this();
setValue(s);
}
/**
* required by the driver.
*/
public PGline() {
type = "line";
}
/**
* @param s Definition of the line in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
isNull = s == null;
if (s == null) {
return;
}
if (s.trim().startsWith("{")) {
PGtokenizer t = new PGtokenizer(PGtokenizer.removeCurlyBrace(s), ',');
if (t.getSize() != 3) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
a = Double.parseDouble(t.getToken(0));
b = Double.parseDouble(t.getToken(1));
c = Double.parseDouble(t.getToken(2));
} else if (s.trim().startsWith("[")) {
PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
if (t.getSize() != 2) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
PGpoint point1 = new PGpoint(t.getToken(0));
PGpoint point2 = new PGpoint(t.getToken(1));
a = point2.x - point1.x;
b = point2.y - point1.y;
c = point1.y;
}
}
/**
* @param obj Object to compare with
* @return true if the two lines are identical
@ -206,6 +173,39 @@ public class PGline extends PGobject implements Serializable, Cloneable {
return isNull ? null : "{" + a + "," + b + "," + c + "}";
}
/**
* @param s Definition of the line in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
isNull = s == null;
if (s == null) {
return;
}
if (s.trim().startsWith("{")) {
PGtokenizer t = new PGtokenizer(PGtokenizer.removeCurlyBrace(s), ',');
if (t.getSize() != 3) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
a = Double.parseDouble(t.getToken(0));
b = Double.parseDouble(t.getToken(1));
c = Double.parseDouble(t.getToken(2));
} else if (s.trim().startsWith("[")) {
PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
if (t.getSize() != 2) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
PGpoint point1 = new PGpoint(t.getToken(0));
PGpoint point2 = new PGpoint(t.getToken(1));
a = point2.x - point1.x;
b = point2.y - point1.y;
c = point1.y;
}
}
@Override
public Object clone() throws CloneNotSupportedException {
// squid:S2157 "Cloneables" should implement "clone

View file

@ -22,7 +22,7 @@ public class PGlseg extends PGobject implements Serializable, Cloneable {
/**
* These are the two points.
*/
public PGpoint [] point;
public PGpoint[] point;
/**
* @param x1 coordinate for first point
@ -59,30 +59,6 @@ public class PGlseg extends PGobject implements Serializable, Cloneable {
type = "lseg";
}
/**
* @param s Definition of the line segment in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
point = null;
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
if (t.getSize() != 2) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
PGpoint[] point = this.point;
if (point == null) {
this.point = point = new PGpoint[2];
}
point[0] = new PGpoint(t.getToken(0));
point[1] = new PGpoint(t.getToken(1));
}
/**
* @param obj Object to compare with
* @return true if the two line segments are identical
@ -138,4 +114,28 @@ public class PGlseg extends PGobject implements Serializable, Cloneable {
}
return "[" + point[0] + "," + point[1] + "]";
}
/**
* @param s Definition of the line segment in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
point = null;
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
if (t.getSize() != 2) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH);
}
PGpoint[] point = this.point;
if (point == null) {
this.point = point = new PGpoint[2];
}
point[0] = new PGpoint(t.getToken(0));
point[1] = new PGpoint(t.getToken(1));
}
}

View file

@ -27,13 +27,13 @@ public class PGpath extends PGobject implements Serializable, Cloneable {
/**
* The points defining this path.
*/
public PGpoint [] points;
public PGpoint[] points;
/**
* @param points the PGpoints that define the path
* @param open True if the path is open, false if closed
*/
public PGpath(PGpoint [] points, boolean open) {
public PGpath(PGpoint[] points, boolean open) {
this();
this.points = points;
this.open = open;
@ -55,37 +55,6 @@ public class PGpath extends PGobject implements Serializable, Cloneable {
setValue(s);
}
/**
* @param s Definition of the path in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
points = null;
return;
}
// First test to see if were open
if (s.startsWith("[") && s.endsWith("]")) {
open = true;
s = PGtokenizer.removeBox(s);
} else if (s.startsWith("(") && s.endsWith(")")) {
open = false;
s = PGtokenizer.removePara(s);
} else {
throw new PSQLException(GT.tr("Cannot tell if path is open or closed: {0}.", s),
PSQLState.DATA_TYPE_MISMATCH);
}
PGtokenizer t = new PGtokenizer(s, ',');
int npoints = t.getSize();
PGpoint[] points = new PGpoint[npoints];
this.points = points;
for (int p = 0; p < npoints; p++) {
points[p] = new PGpoint(t.getToken(p));
}
}
/**
* @param obj Object to compare with
* @return true if the two paths are identical
@ -151,6 +120,7 @@ public class PGpath extends PGobject implements Serializable, Cloneable {
/**
* This returns the path in the syntax expected by org.postgresql.
*
* @return the value of this object
*/
@Override
@ -172,6 +142,37 @@ public class PGpath extends PGobject implements Serializable, Cloneable {
return b.toString();
}
/**
* @param s Definition of the path in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
points = null;
return;
}
// First test to see if were open
if (s.startsWith("[") && s.endsWith("]")) {
open = true;
s = PGtokenizer.removeBox(s);
} else if (s.startsWith("(") && s.endsWith(")")) {
open = false;
s = PGtokenizer.removePara(s);
} else {
throw new PSQLException(GT.tr("Cannot tell if path is open or closed: {0}.", s),
PSQLState.DATA_TYPE_MISMATCH);
}
PGtokenizer t = new PGtokenizer(s, ',');
int npoints = t.getSize();
PGpoint[] points = new PGpoint[npoints];
this.points = points;
for (int p = 0; p < npoints; p++) {
points[p] = new PGpoint(t.getToken(p));
}
}
public boolean isOpen() {
return open && points != null;
}

View file

@ -67,26 +67,6 @@ public class PGpoint extends PGobject implements PGBinaryObject, Serializable, C
type = "point";
}
/**
* @param s Definition of this point in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
isNull = s == null;
if (s == null) {
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
try {
x = Double.parseDouble(t.getToken(0));
y = Double.parseDouble(t.getToken(1));
} catch (NumberFormatException e) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH, e);
}
}
/**
* @param b Definition of this point in PostgreSQL's binary syntax
*/
@ -133,6 +113,26 @@ public class PGpoint extends PGobject implements PGBinaryObject, Serializable, C
return isNull ? null : "(" + x + "," + y + ")";
}
/**
* @param s Definition of this point in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
isNull = s == null;
if (s == null) {
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
try {
x = Double.parseDouble(t.getToken(0));
y = Double.parseDouble(t.getToken(1));
} catch (NumberFormatException e) {
throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
PSQLState.DATA_TYPE_MISMATCH, e);
}
}
@Override
public int lengthInBytes() {
return isNull ? 0 : 16;

View file

@ -19,7 +19,7 @@ public class PGpolygon extends PGobject implements Serializable, Cloneable {
/**
* The points defining the polygon.
*/
public PGpoint [] points;
public PGpoint[] points;
/**
* Creates a polygon using an array of PGpoints.
@ -47,27 +47,6 @@ public class PGpolygon extends PGobject implements Serializable, Cloneable {
type = "polygon";
}
/**
* @param s Definition of the polygon in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
points = null;
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
int npoints = t.getSize();
PGpoint[] points = this.points;
if (points == null || points.length != npoints) {
this.points = points = new PGpoint[npoints];
}
for (int p = 0; p < npoints; p++) {
points[p] = new PGpoint(t.getToken(p));
}
}
/**
* @param obj Object to compare with
* @return true if the two polygons are identical
@ -148,4 +127,25 @@ public class PGpolygon extends PGobject implements Serializable, Cloneable {
b.append(")");
return b.toString();
}
/**
* @param s Definition of the polygon in PostgreSQL's syntax
* @throws SQLException on conversion failure
*/
@Override
public void setValue(String s) throws SQLException {
if (s == null) {
points = null;
return;
}
PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
int npoints = t.getSize();
PGpoint[] points = this.points;
if (points == null || points.length != npoints) {
this.points = points = new PGpoint[npoints];
}
for (int p = 0; p < npoints; p++) {
points[p] = new PGpoint(t.getToken(p));
}
}
}

View file

@ -6,7 +6,6 @@
package org.postgresql.gss;
import java.io.IOException;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
@ -21,9 +20,9 @@ import javax.security.auth.callback.UnsupportedCallbackException;
class GSSCallbackHandler implements CallbackHandler {
private final String user;
private final char [] password;
private final char[] password;
GSSCallbackHandler(String user, char [] password) {
GSSCallbackHandler(String user, char[] password) {
this.user = user;
this.password = password;
}

View file

@ -5,18 +5,17 @@
package org.postgresql.gss;
import java.io.IOException;
import java.io.InputStream;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.MessageProp;
import java.io.IOException;
import java.io.InputStream;
public class GSSInputStream extends InputStream {
private final GSSContext gssContext;
private final MessageProp messageProp;
private final InputStream wrapped;
byte [] unencrypted;
byte[] unencrypted;
int unencryptedPos;
int unencryptedLength;
@ -32,18 +31,18 @@ public class GSSInputStream extends InputStream {
}
@Override
public int read(byte [] buffer, int pos, int len) throws IOException {
public int read(byte[] buffer, int pos, int len) throws IOException {
byte[] int4Buf = new byte[4];
int encryptedLength;
int copyLength = 0;
if ( unencryptedLength > 0 ) {
if (unencryptedLength > 0) {
copyLength = Math.min(len, unencryptedLength);
System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
unencryptedLength -= copyLength;
unencryptedPos += copyLength;
} else {
if (wrapped.read(int4Buf, 0, 4) == 4 ) {
if (wrapped.read(int4Buf, 0, 4) == 4) {
encryptedLength = (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
| int4Buf[3] & 0xFF;

View file

@ -5,20 +5,19 @@
package org.postgresql.gss;
import java.io.IOException;
import java.io.OutputStream;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.MessageProp;
import java.io.IOException;
import java.io.OutputStream;
public class GSSOutputStream extends OutputStream {
private final GSSContext gssContext;
private final MessageProp messageProp;
private final byte[] buffer;
private final byte[] int4Buf = new byte[4];
private int index;
private final OutputStream wrapped;
private int index;
public GSSOutputStream(OutputStream out, GSSContext gssContext, MessageProp messageProp, int bufferSize) {
wrapped = out;
@ -44,9 +43,9 @@ public class GSSOutputStream extends OutputStream {
public void write(byte[] b, int pos, int len) throws IOException {
int max;
while ( len > 0 ) {
while (len > 0) {
int roomToWrite = buffer.length - index;
if ( len < roomToWrite ) {
if (len < roomToWrite) {
System.arraycopy(b, pos, buffer, index, len);
index += len;
len -= roomToWrite;
@ -68,7 +67,7 @@ public class GSSOutputStream extends OutputStream {
sendInteger4Raw(token.length);
wrapped.write(token, 0, token.length);
index = 0;
} catch ( GSSException ex ) {
} catch (GSSException ex) {
throw new IOException(ex);
}
wrapped.flush();

View file

@ -5,19 +5,6 @@
package org.postgresql.gss;
import org.postgresql.core.PGStream;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.ServerErrorMessage;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import java.io.IOException;
import java.security.Principal;
import java.security.PrivilegedAction;
@ -26,8 +13,18 @@ import java.util.Set;
import java.util.concurrent.Callable;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.security.auth.Subject;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import org.postgresql.core.PGStream;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.postgresql.util.ServerErrorMessage;
class GssAction implements PrivilegedAction<Exception>, Callable<Exception> {

View file

@ -5,18 +5,6 @@
package org.postgresql.gss;
import org.postgresql.core.PGStream;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import java.io.IOException;
import java.security.Principal;
import java.security.PrivilegedAction;
@ -25,8 +13,17 @@ import java.util.Set;
import java.util.concurrent.Callable;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.security.auth.Subject;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import org.postgresql.core.PGStream;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
public class GssEncAction implements PrivilegedAction<Exception>, Callable<Exception> {
private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());

View file

@ -5,14 +5,6 @@
package org.postgresql.gss;
import org.postgresql.PGProperty;
import org.postgresql.core.PGStream;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.ietf.jgss.GSSCredential;
import java.io.IOException;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
@ -22,9 +14,14 @@ import java.util.Set;
import java.util.concurrent.Callable;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.security.auth.Subject;
import javax.security.auth.login.LoginContext;
import org.ietf.jgss.GSSCredential;
import org.postgresql.PGProperty;
import org.postgresql.core.PGStream;
import org.postgresql.util.GT;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
public class MakeGSS {
private static final Logger LOGGER = Logger.getLogger(MakeGSS.class.getName());
@ -86,6 +83,7 @@ public class MakeGSS {
/**
* Use {@code Subject.current()} in Java 18+, and
* {@code Subject.getSubject(AccessController.getContext())} in Java before 18.
*
* @return current Subject or null
*/
@SuppressWarnings("deprecation")
@ -112,7 +110,7 @@ public class MakeGSS {
}
public static void authenticate(boolean encrypted,
PGStream pgStream, String host, String user, char [] password,
PGStream pgStream, String host, String user, char[] password,
String jaasApplicationName, String kerberosServerName,
boolean useSpnego, boolean jaasLogin,
boolean logServerErrorDetail)
@ -145,7 +143,7 @@ public class MakeGSS {
}
PrivilegedAction<Exception> action;
if ( encrypted ) {
if (encrypted) {
action = new GssEncAction(pgStream, sub, host, user,
kerberosServerName, useSpnego, logServerErrorDetail);
} else {

View file

@ -51,8 +51,6 @@ public enum HostRequirement {
}
};
public abstract boolean allowConnectingTo(HostStatus status);
/**
* <p>The postgreSQL project has decided not to use the term slave to refer to alternate servers.
* secondary or standby is preferred. We have arbitrarily chosen secondary.
@ -73,4 +71,6 @@ public enum HostRequirement {
return valueOf(allowSlave);
}
public abstract boolean allowConnectingTo(HostStatus status);
}

View file

@ -55,8 +55,8 @@ class MultiHostChooser implements HostChooser {
}
private Iterator<CandidateHost> candidateIterator() {
if ( targetServerType != HostRequirement.preferSecondary
&& targetServerType != HostRequirement.preferPrimary ) {
if (targetServerType != HostRequirement.preferSecondary
&& targetServerType != HostRequirement.preferPrimary) {
return getCandidateHosts(targetServerType).iterator();
}
@ -74,7 +74,7 @@ class MultiHostChooser implements HostChooser {
List<CandidateHost> preferred = getCandidateHosts(preferredServerType);
List<CandidateHost> any = getCandidateHosts(HostRequirement.any);
if ( !preferred.isEmpty() && !any.isEmpty()
if (!preferred.isEmpty() && !any.isEmpty()
&& preferred.get(preferred.size() - 1).hostSpec.equals(any.get(0).hostSpec)) {
// When the last preferred host's hostspec is the same as the first in "any" list, there's no need
// to attempt to connect it as "preferred"

View file

@ -26,21 +26,18 @@ import java.util.ArrayList;
*/
@SuppressWarnings("try")
public abstract class AbstractBlobClob {
protected final ResourceLock lock = new ResourceLock();
private final boolean support64bit;
private final long oid;
protected BaseConnection conn;
private LargeObject currentLo;
private boolean currentLoIsWriteable;
private final boolean support64bit;
/**
* We create separate LargeObjects for methods that use streams so they won't interfere with each
* other.
*/
private ArrayList<LargeObject> subLOs = new ArrayList<LargeObject>();
protected final ResourceLock lock = new ResourceLock();
private final long oid;
public AbstractBlobClob(BaseConnection conn, long oid) throws SQLException {
this.conn = conn;
this.oid = oid;
@ -175,36 +172,6 @@ public abstract class AbstractBlobClob {
}
}
/**
* Iterates over a large object returning byte values. Will buffer the data from the large object.
*/
private class LOIterator {
private static final int BUFFER_SIZE = 8096;
private final byte[] buffer = new byte[BUFFER_SIZE];
private int idx = BUFFER_SIZE;
private int numBytes = BUFFER_SIZE;
LOIterator(long start) throws SQLException {
getLo(false).seek((int) start);
}
public boolean hasNext() throws SQLException {
boolean result;
if (idx < numBytes) {
result = true;
} else {
numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE);
idx = 0;
result = numBytes > 0;
}
return result;
}
private byte next() {
return buffer[idx++];
}
}
/**
* This is simply passing the byte value of the pattern Blob.
*
@ -291,4 +258,34 @@ public abstract class AbstractBlobClob {
protected void addSubLO(LargeObject subLO) {
subLOs.add(subLO);
}
/**
* Iterates over a large object returning byte values. Will buffer the data from the large object.
*/
private class LOIterator {
private static final int BUFFER_SIZE = 8096;
private final byte[] buffer = new byte[BUFFER_SIZE];
private int idx = BUFFER_SIZE;
private int numBytes = BUFFER_SIZE;
LOIterator(long start) throws SQLException {
getLo(false).seek((int) start);
}
public boolean hasNext() throws SQLException {
boolean result;
if (idx < numBytes) {
result = true;
} else {
numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE);
idx = 0;
result = numBytes > 0;
}
return result;
}
private byte next() {
return buffer[idx++];
}
}
}

View file

@ -44,134 +44,6 @@ import java.util.Map;
*/
public final class ArrayDecoding {
public ArrayDecoding() {
}
/**
* Array list implementation specific for storing PG array elements. If
* {@link PgArrayList#dimensionsCount} is {@code 1}, the contents will be
* {@link String}. For all larger <i>dimensionsCount</i>, the values will be
* {@link PgArrayList} instances.
*/
@SuppressWarnings("serial")
public static final class PgArrayList extends ArrayList<Object> {
/**
* How many dimensions.
*/
int dimensionsCount = 1;
public PgArrayList() {
}
}
private interface ArrayDecoder<A extends Object> {
A createArray(int size);
Object[] createMultiDimensionalArray(int[] sizes);
boolean supportBinary();
void populateFromBinary(A array, int index, int count, ByteBuffer bytes, BaseConnection connection)
throws SQLException;
void populateFromString(A array, List<String> strings, BaseConnection connection) throws SQLException;
}
private abstract static class AbstractObjectStringArrayDecoder<A extends Object> implements ArrayDecoder<A> {
final Class<?> baseClazz;
AbstractObjectStringArrayDecoder(Class<?> baseClazz) {
this.baseClazz = baseClazz;
}
/**
* {@inheritDoc}
*/
@Override
public boolean supportBinary() {
return false;
}
@SuppressWarnings("unchecked")
@Override
public A createArray(int size) {
return (A) Array.newInstance(baseClazz, size);
}
/**
* {@inheritDoc}
*/
@Override
public Object[] createMultiDimensionalArray(int[] sizes) {
return (Object[]) Array.newInstance(baseClazz, sizes);
}
@Override
public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
throws SQLException {
throw new SQLFeatureNotSupportedException();
}
/**
* {@inheritDoc}
*/
@Override
public void populateFromString(A arr, List<String> strings, BaseConnection connection) throws SQLException {
final Object[] array = (Object[]) arr;
for (int i = 0, j = strings.size(); i < j; i++) {
final String stringVal = strings.get(i);
array[i] = stringVal != null ? parseValue(stringVal, connection) : null;
}
}
abstract Object parseValue(String stringVal, BaseConnection connection) throws SQLException;
}
private abstract static class AbstractObjectArrayDecoder<A extends Object> extends AbstractObjectStringArrayDecoder<A> {
AbstractObjectArrayDecoder(Class<?> baseClazz) {
super(baseClazz);
}
/**
* {@inheritDoc}
*/
@Override
public boolean supportBinary() {
return true;
}
@Override
public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
throws SQLException {
final Object[] array = (Object[]) arr;
// skip through to the requested index
for (int i = 0; i < index; i++) {
final int length = bytes.getInt();
if (length > 0) {
bytes.position(bytes.position() + length);
}
}
for (int i = 0; i < count; i++) {
final int length = bytes.getInt();
if (length != -1) {
array[i] = parseValue(length, bytes, connection);
} else {
// explicitly set to null for reader's clarity
array[i] = null;
}
}
}
abstract Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException;
}
private static final ArrayDecoder<Long[]> LONG_OBJ_ARRAY = new AbstractObjectArrayDecoder<Long[]>(Long.class) {
@Override
@ -184,7 +56,6 @@ public final class ArrayDecoding {
return PgResultSet.toLong(stringVal);
}
};
private static final ArrayDecoder<Long[]> INT4_UNSIGNED_OBJ_ARRAY = new AbstractObjectArrayDecoder<Long[]>(
Long.class) {
@ -198,7 +69,6 @@ public final class ArrayDecoding {
return PgResultSet.toLong(stringVal);
}
};
private static final ArrayDecoder<Integer[]> INTEGER_OBJ_ARRAY = new AbstractObjectArrayDecoder<Integer[]>(
Integer.class) {
@ -212,7 +82,6 @@ public final class ArrayDecoding {
return PgResultSet.toInt(stringVal);
}
};
private static final ArrayDecoder<Short[]> SHORT_OBJ_ARRAY = new AbstractObjectArrayDecoder<Short[]>(Short.class) {
@Override
@ -225,7 +94,6 @@ public final class ArrayDecoding {
return PgResultSet.toShort(stringVal);
}
};
private static final ArrayDecoder<Double[]> DOUBLE_OBJ_ARRAY = new AbstractObjectArrayDecoder<Double[]>(
Double.class) {
@ -239,7 +107,6 @@ public final class ArrayDecoding {
return PgResultSet.toDouble(stringVal);
}
};
private static final ArrayDecoder<Float[]> FLOAT_OBJ_ARRAY = new AbstractObjectArrayDecoder<Float[]>(Float.class) {
@Override
@ -252,7 +119,6 @@ public final class ArrayDecoding {
return PgResultSet.toFloat(stringVal);
}
};
private static final ArrayDecoder<Boolean[]> BOOLEAN_OBJ_ARRAY = new AbstractObjectArrayDecoder<Boolean[]>(
Boolean.class) {
@ -266,7 +132,6 @@ public final class ArrayDecoding {
return BooleanTypeUtil.fromString(stringVal);
}
};
private static final ArrayDecoder<String[]> STRING_ARRAY = new AbstractObjectArrayDecoder<>(String.class) {
@Override
@ -292,7 +157,6 @@ public final class ArrayDecoding {
return stringVal;
}
};
private static final ArrayDecoder<byte[][]> BYTE_ARRAY_ARRAY = new AbstractObjectArrayDecoder<byte[][]>(
byte[].class) {
@ -311,7 +175,6 @@ public final class ArrayDecoding {
return PGbytea.toBytes(stringVal.getBytes(StandardCharsets.US_ASCII));
}
};
private static final ArrayDecoder<BigDecimal[]> BIG_DECIMAL_STRING_DECODER = new AbstractObjectStringArrayDecoder<BigDecimal[]>(
BigDecimal.class) {
@ -320,7 +183,6 @@ public final class ArrayDecoding {
return PgResultSet.toBigDecimal(stringVal);
}
};
private static final ArrayDecoder<String[]> STRING_ONLY_DECODER = new AbstractObjectStringArrayDecoder<String[]>(
String.class) {
@ -329,7 +191,6 @@ public final class ArrayDecoding {
return stringVal;
}
};
private static final ArrayDecoder<Date[]> DATE_DECODER = new AbstractObjectStringArrayDecoder<Date[]>(
Date.class) {
@ -339,7 +200,6 @@ public final class ArrayDecoding {
return connection.getTimestampUtils().toDate(null, stringVal);
}
};
private static final ArrayDecoder<Time[]> TIME_DECODER = new AbstractObjectStringArrayDecoder<Time[]>(
Time.class) {
@ -349,7 +209,6 @@ public final class ArrayDecoding {
return connection.getTimestampUtils().toTime(null, stringVal);
}
};
private static final ArrayDecoder<Timestamp[]> TIMESTAMP_DECODER = new AbstractObjectStringArrayDecoder<Timestamp[]>(
Timestamp.class) {
@ -359,7 +218,6 @@ public final class ArrayDecoding {
return connection.getTimestampUtils().toTimestamp(null, stringVal);
}
};
/**
* Maps from base type oid to {@link ArrayDecoder} capable of processing
* entries.
@ -394,74 +252,13 @@ public final class ArrayDecoding {
OID_TO_DECODER.put(Oid.TIMESTAMPTZ, TIMESTAMP_DECODER);
}
@SuppressWarnings("rawtypes")
private static final class ArrayAssistantObjectArrayDecoder extends AbstractObjectArrayDecoder {
private final ArrayAssistant arrayAssistant;
@SuppressWarnings("unchecked")
ArrayAssistantObjectArrayDecoder(ArrayAssistant arrayAssistant) {
super(arrayAssistant.baseType());
this.arrayAssistant = arrayAssistant;
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
assert bytes.hasArray();
final byte[] byteArray = bytes.array();
final int offset = bytes.arrayOffset() + bytes.position();
final Object val = arrayAssistant.buildElement(byteArray, offset, length);
bytes.position(bytes.position() + length);
return val;
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
return arrayAssistant.buildElement(stringVal);
}
}
private static final class MappedTypeObjectArrayDecoder extends AbstractObjectArrayDecoder<Object[]> {
private final String typeName;
MappedTypeObjectArrayDecoder(String baseTypeName) {
super(Object.class);
this.typeName = baseTypeName;
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
final byte[] copy = new byte[length];
bytes.get(copy);
return connection.getObject(typeName, null, copy);
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
return connection.getObject(typeName, stringVal, null);
}
public ArrayDecoding() {
}
@SuppressWarnings("unchecked")
private static <A extends Object> ArrayDecoder<A> getDecoder(int oid, BaseConnection connection) throws SQLException {
final Integer key = oid;
@SuppressWarnings("rawtypes")
final ArrayDecoder decoder = OID_TO_DECODER.get(key);
@SuppressWarnings("rawtypes") final ArrayDecoder decoder = OID_TO_DECODER.get(key);
if (decoder != null) {
return decoder;
}
@ -488,18 +285,13 @@ public final class ArrayDecoding {
/**
* Reads binary representation of array into object model.
*
* @param index
* 1 based index of where to start on outermost array.
* @param count
* The number of items to return from outermost array (beginning at
* @param index 1 based index of where to start on outermost array.
* @param count The number of items to return from outermost array (beginning at
* <i>index</i>).
* @param bytes
* The binary representation of the array.
* @param connection
* The connection the <i>bytes</i> were retrieved from.
* @param bytes The binary representation of the array.
* @param connection The connection the <i>bytes</i> were retrieved from.
* @return The parsed array.
* @throws SQLException
* For failures encountered during parsing.
* @throws SQLException For failures encountered during parsing.
*/
@SuppressWarnings("unchecked")
public static Object readBinaryArray(int index, int count, byte[] bytes, BaseConnection connection)
@ -510,8 +302,7 @@ public final class ArrayDecoding {
final boolean hasNulls = buffer.getInt() != 0;
final int elementOid = buffer.getInt();
@SuppressWarnings("rawtypes")
final ArrayDecoder decoder = getDecoder(elementOid, connection);
@SuppressWarnings("rawtypes") final ArrayDecoder decoder = getDecoder(elementOid, connection);
if (!decoder.supportBinary()) {
throw Driver.notImplemented(PgArray.class, "readBinaryArray(data,oid)");
@ -581,10 +372,8 @@ public final class ArrayDecoding {
/**
* Parses the string representation of an array into a {@link PgArrayList}.
*
* @param fieldString
* The array value to parse.
* @param delim
* The delimiter character appropriate for the data type.
* @param fieldString The array value to parse.
* @param delim The delimiter character appropriate for the data type.
* @return A {@link PgArrayList} representing the parsed <i>fieldString</i>.
*/
static PgArrayList buildArrayList(String fieldString, char delim) {
@ -711,21 +500,15 @@ public final class ArrayDecoding {
/**
* Reads {@code String} representation of array into object model.
*
* @param index
* 1 based index of where to start on outermost array.
* @param count
* The number of items to return from outermost array (beginning at
* @param index 1 based index of where to start on outermost array.
* @param count The number of items to return from outermost array (beginning at
* <i>index</i>).
* @param oid
* The oid of the base type of the array.
* @param list
* The {@code #buildArrayList(String, char) processed} string
* @param oid The oid of the base type of the array.
* @param list The {@code #buildArrayList(String, char) processed} string
* representation of an array.
* @param connection
* The connection the <i>bytes</i> were retrieved from.
* @param connection The connection the <i>bytes</i> were retrieved from.
* @return The parsed array.
* @throws SQLException
* For failures encountered during parsing.
* @throws SQLException For failures encountered during parsing.
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public static Object readStringArray(int index, int count, int oid, PgArrayList list, BaseConnection connection)
@ -788,7 +571,7 @@ public final class ArrayDecoding {
}
@SuppressWarnings({"unchecked", "rawtypes"})
private static <A extends Object> void storeStringValues(A[] array, ArrayDecoder<A> decoder, List list, int [] dimensionLengths,
private static <A extends Object> void storeStringValues(A[] array, ArrayDecoder<A> decoder, List list, int[] dimensionLengths,
int dim, BaseConnection connection) throws SQLException {
assert dim <= dimensionLengths.length - 2;
@ -801,4 +584,192 @@ public final class ArrayDecoding {
}
}
}
private interface ArrayDecoder<A extends Object> {
A createArray(int size);
Object[] createMultiDimensionalArray(int[] sizes);
boolean supportBinary();
void populateFromBinary(A array, int index, int count, ByteBuffer bytes, BaseConnection connection)
throws SQLException;
void populateFromString(A array, List<String> strings, BaseConnection connection) throws SQLException;
}
/**
* Array list implementation specific for storing PG array elements. If
* {@link PgArrayList#dimensionsCount} is {@code 1}, the contents will be
* {@link String}. For all larger <i>dimensionsCount</i>, the values will be
* {@link PgArrayList} instances.
*/
@SuppressWarnings("serial")
public static final class PgArrayList extends ArrayList<Object> {
/**
* How many dimensions.
*/
int dimensionsCount = 1;
public PgArrayList() {
}
}
private abstract static class AbstractObjectStringArrayDecoder<A extends Object> implements ArrayDecoder<A> {
final Class<?> baseClazz;
AbstractObjectStringArrayDecoder(Class<?> baseClazz) {
this.baseClazz = baseClazz;
}
/**
* {@inheritDoc}
*/
@Override
public boolean supportBinary() {
return false;
}
@SuppressWarnings("unchecked")
@Override
public A createArray(int size) {
return (A) Array.newInstance(baseClazz, size);
}
/**
* {@inheritDoc}
*/
@Override
public Object[] createMultiDimensionalArray(int[] sizes) {
return (Object[]) Array.newInstance(baseClazz, sizes);
}
@Override
public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
throws SQLException {
throw new SQLFeatureNotSupportedException();
}
/**
* {@inheritDoc}
*/
@Override
public void populateFromString(A arr, List<String> strings, BaseConnection connection) throws SQLException {
final Object[] array = (Object[]) arr;
for (int i = 0, j = strings.size(); i < j; i++) {
final String stringVal = strings.get(i);
array[i] = stringVal != null ? parseValue(stringVal, connection) : null;
}
}
abstract Object parseValue(String stringVal, BaseConnection connection) throws SQLException;
}
private abstract static class AbstractObjectArrayDecoder<A extends Object> extends AbstractObjectStringArrayDecoder<A> {
AbstractObjectArrayDecoder(Class<?> baseClazz) {
super(baseClazz);
}
/**
* {@inheritDoc}
*/
@Override
public boolean supportBinary() {
return true;
}
@Override
public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
throws SQLException {
final Object[] array = (Object[]) arr;
// skip through to the requested index
for (int i = 0; i < index; i++) {
final int length = bytes.getInt();
if (length > 0) {
bytes.position(bytes.position() + length);
}
}
for (int i = 0; i < count; i++) {
final int length = bytes.getInt();
if (length != -1) {
array[i] = parseValue(length, bytes, connection);
} else {
// explicitly set to null for reader's clarity
array[i] = null;
}
}
}
abstract Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException;
}
@SuppressWarnings("rawtypes")
private static final class ArrayAssistantObjectArrayDecoder extends AbstractObjectArrayDecoder {
private final ArrayAssistant arrayAssistant;
@SuppressWarnings("unchecked")
ArrayAssistantObjectArrayDecoder(ArrayAssistant arrayAssistant) {
super(arrayAssistant.baseType());
this.arrayAssistant = arrayAssistant;
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
assert bytes.hasArray();
final byte[] byteArray = bytes.array();
final int offset = bytes.arrayOffset() + bytes.position();
final Object val = arrayAssistant.buildElement(byteArray, offset, length);
bytes.position(bytes.position() + length);
return val;
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
return arrayAssistant.buildElement(stringVal);
}
}
private static final class MappedTypeObjectArrayDecoder extends AbstractObjectArrayDecoder<Object[]> {
private final String typeName;
MappedTypeObjectArrayDecoder(String baseTypeName) {
super(Object.class);
this.typeName = baseTypeName;
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
final byte[] copy = new byte[length];
bytes.get(copy);
return connection.getObject(typeName, null, copy);
}
/**
* {@inheritDoc}
*/
@Override
Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
return connection.getObject(typeName, stringVal, null);
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -18,11 +18,11 @@ public enum AutoSave {
value = this.name().toLowerCase(Locale.ROOT);
}
public String value() {
return value;
}
public static AutoSave of(String value) {
return valueOf(value.toUpperCase(Locale.ROOT));
}
public String value() {
return value;
}
}

View file

@ -31,20 +31,19 @@ import java.util.List;
public class BatchResultHandler extends ResultHandlerBase {
private final PgStatement pgStatement;
private int resultIndex;
private final Query[] queries;
private final long[] longUpdateCounts;
private final ParameterList [] parameterLists;
private final ParameterList[] parameterLists;
private final boolean expectGeneratedKeys;
private final List<List<Tuple>> allGeneratedRows;
private int resultIndex;
private PgResultSet generatedKeys;
private int committedRows; // 0 means no rows committed. 1 means row 0 was committed, and so on
private final List<List<Tuple>> allGeneratedRows;
private List<Tuple> latestGeneratedRows;
private PgResultSet latestGeneratedKeysRs;
BatchResultHandler(PgStatement pgStatement, Query[] queries,
ParameterList [] parameterLists,
ParameterList[] parameterLists,
boolean expectGeneratedKeys) {
this.pgStatement = pgStatement;
this.queries = queries;

View file

@ -543,6 +543,7 @@ public final class EscapedFunctions2 {
/**
* Compares two TSI intervals. It is
*
* @param a first interval to compare
* @param b second interval to compare
* @return true when both intervals are equal (case insensitive)
@ -554,6 +555,7 @@ public final class EscapedFunctions2 {
/**
* Checks if given input starts with {@link #SQL_TSI_ROOT}
*
* @param interval input string
* @return true if interval.startsWithIgnoreCase(SQL_TSI_ROOT)
*/
@ -673,6 +675,7 @@ public final class EscapedFunctions2 {
/**
* Appends {@code begin arg0 separator arg1 separator end} sequence to the input {@link StringBuilder}
*
* @param sb destination StringBuilder
* @param begin begin string
* @param separator separator string

View file

@ -12,6 +12,44 @@ import org.postgresql.util.CanEstimateSize;
* This class is not meant to be used outside of pgjdbc.
*/
public class FieldMetadata implements CanEstimateSize {
final String columnName;
final String tableName;
final String schemaName;
final int nullable;
final boolean autoIncrement;
public FieldMetadata(String columnName) {
this(columnName, "", "", PgResultSetMetaData.columnNullableUnknown, false);
}
FieldMetadata(String columnName, String tableName, String schemaName, int nullable,
boolean autoIncrement) {
this.columnName = columnName;
this.tableName = tableName;
this.schemaName = schemaName;
this.nullable = nullable;
this.autoIncrement = autoIncrement;
}
@Override
public long getSize() {
return columnName.length() * 2
+ tableName.length() * 2
+ schemaName.length() * 2
+ 4L
+ 1L;
}
@Override
public String toString() {
return "FieldMetadata{"
+ "columnName='" + columnName + '\''
+ ", tableName='" + tableName + '\''
+ ", schemaName='" + schemaName + '\''
+ ", nullable=" + nullable
+ ", autoIncrement=" + autoIncrement
+ '}';
}
public static class Key {
final int tableOid;
final int positionInTable;
@ -53,43 +91,4 @@ public class FieldMetadata implements CanEstimateSize {
+ '}';
}
}
final String columnName;
final String tableName;
final String schemaName;
final int nullable;
final boolean autoIncrement;
public FieldMetadata(String columnName) {
this(columnName, "", "", PgResultSetMetaData.columnNullableUnknown, false);
}
FieldMetadata(String columnName, String tableName, String schemaName, int nullable,
boolean autoIncrement) {
this.columnName = columnName;
this.tableName = tableName;
this.schemaName = schemaName;
this.nullable = nullable;
this.autoIncrement = autoIncrement;
}
@Override
public long getSize() {
return columnName.length() * 2
+ tableName.length() * 2
+ schemaName.length() * 2
+ 4L
+ 1L;
}
@Override
public String toString() {
return "FieldMetadata{"
+ "columnName='" + columnName + '\''
+ ", tableName='" + tableName + '\''
+ ", schemaName='" + schemaName + '\''
+ ", nullable=" + nullable
+ ", autoIncrement=" + autoIncrement
+ '}';
}
}

View file

@ -42,10 +42,6 @@ public enum GSSEncMode {
this.value = value;
}
public boolean requireEncryption() {
return this.compareTo(REQUIRE) >= 0;
}
public static GSSEncMode of(Properties info) throws PSQLException {
String gssEncMode = PGProperty.GSS_ENC_MODE.getOrDefault(info);
// If gssEncMode is not set, fallback to allow
@ -62,4 +58,8 @@ public enum GSSEncMode {
PSQLState.CONNECTION_UNABLE_TO_CONNECT);
}
public boolean requireEncryption() {
return this.compareTo(REQUIRE) >= 0;
}
}

View file

@ -15,8 +15,8 @@ import java.sql.Savepoint;
public class PSQLSavepoint implements Savepoint {
private boolean isValid;
private final boolean isNamed;
private boolean isValid;
private int id;
private String name;

View file

@ -44,30 +44,25 @@ public class PgArray implements Array {
ArrayAssistantRegistry.register(Oid.UUID_ARRAY, new UUIDArrayAssistant());
}
/**
* A database connection.
*/
protected BaseConnection connection;
/**
* The OID of this field.
*/
private final int oid;
private final ResourceLock lock = new ResourceLock();
/**
* A database connection.
*/
protected BaseConnection connection;
/**
* Field value as String.
*/
protected String fieldString;
/**
* Value of field as {@link PgArrayList}. Will be initialized only once within
* {@link #buildArrayList(String)}.
*/
protected ArrayDecoding.PgArrayList arrayList;
protected byte [] fieldBytes;
private final ResourceLock lock = new ResourceLock();
protected byte[] fieldBytes;
private PgArray(BaseConnection connection, int oid) throws SQLException {
this.connection = connection;
@ -96,12 +91,25 @@ public class PgArray implements Array {
* @param fieldBytes the array data in byte form
* @throws SQLException if something wrong happens
*/
public PgArray(BaseConnection connection, int oid, byte [] fieldBytes)
public PgArray(BaseConnection connection, int oid, byte[] fieldBytes)
throws SQLException {
this(connection, oid);
this.fieldBytes = fieldBytes;
}
public static void escapeArrayElement(StringBuilder b, String s) {
b.append('"');
for (int j = 0; j < s.length(); j++) {
char c = s.charAt(j);
if (c == '"' || c == '\\') {
b.append('\\');
}
b.append(c);
}
b.append('"');
}
private BaseConnection getConnection() {
return connection;
}
@ -393,7 +401,7 @@ public class PgArray implements Array {
for (int i = 0; i < count; i++) {
int offset = (int) index + i;
byte[] [] t = new byte[2][0];
byte[][] t = new byte[2][0];
String v = (String) arrayList.get(offset);
t[0] = getConnection().encodeString(Integer.toString(offset + 1));
t[1] = v == null ? null : getConnection().encodeString(v);
@ -405,7 +413,7 @@ public class PgArray implements Array {
fields[1] = new Field("VALUE", oid);
for (int i = 0; i < count; i++) {
int offset = (int) index + i;
byte[] [] t = new byte[2][0];
byte[][] t = new byte[2][0];
Object v = arrayList.get(offset);
t[0] = getConnection().encodeString(Integer.toString(offset + 1));
@ -469,24 +477,11 @@ public class PgArray implements Array {
return b.toString();
}
public static void escapeArrayElement(StringBuilder b, String s) {
b.append('"');
for (int j = 0; j < s.length(); j++) {
char c = s.charAt(j);
if (c == '"' || c == '\\') {
b.append('\\');
}
b.append(c);
}
b.append('"');
}
public boolean isBinary() {
return fieldBytes != null;
}
public byte [] toBytes() {
public byte[] toBytes() {
return fieldBytes;
}

View file

@ -38,14 +38,14 @@ import java.util.Map;
class PgCallableStatement extends PgPreparedStatement implements CallableStatement {
// Used by the callablestatement style methods
private final boolean isFunction;
protected Object[] callResult;
// functionReturnType contains the user supplied value to check
// testReturn contains a modified version to make it easier to
// check the getXXX methods..
private int [] functionReturnType;
private int [] testReturn;
private int[] functionReturnType;
private int[] testReturn;
// returnTypeSet is true when a proper call to registerOutParameter has been made
private boolean returnTypeSet;
protected Object [] callResult;
private int lastIndex;
PgCallableStatement(PgConnection connection, String sql, int rsType, int rsConcurrency,
@ -326,9 +326,9 @@ class PgCallableStatement extends PgPreparedStatement implements CallableStateme
}
@Override
public byte [] getBytes(int parameterIndex) throws SQLException {
public byte[] getBytes(int parameterIndex) throws SQLException {
Object result = checkIndex(parameterIndex, Types.VARBINARY, Types.BINARY, "Bytes");
return (byte []) result;
return (byte[]) result;
}
@Override
@ -415,7 +415,7 @@ class PgCallableStatement extends PgPreparedStatement implements CallableStateme
PSQLState.OBJECT_NOT_IN_STATE);
}
Object [] callResult = this.callResult;
Object[] callResult = this.callResult;
if (callResult == null) {
throw new PSQLException(
GT.tr("Results cannot be retrieved from a CallableStatement before it is executed."),
@ -815,7 +815,7 @@ class PgCallableStatement extends PgPreparedStatement implements CallableStateme
}
@Override
public void setBytes(String parameterName, byte [] x) throws SQLException {
public void setBytes(String parameterName, byte[] x) throws SQLException {
throw Driver.notImplemented(this.getClass(), "setBytes(String,byte)");
}
@ -927,7 +927,7 @@ class PgCallableStatement extends PgPreparedStatement implements CallableStateme
}
@Override
public byte [] getBytes(String parameterName) throws SQLException {
public byte[] getBytes(String parameterName) throws SQLException {
throw Driver.notImplemented(this.getClass(), "getBytes(String)");
}

Some files were not shown because too many files have changed in this diff Show more