) new QueryExecutorTimeZoneProvider(connection.getQueryExecutor()));
+ }
+ return timestampUtils;
+ }
+
+ /**
+ * This is here to be used by metadata functions
+ * to make all column labels upper case.
+ * Because postgres folds columns to lower case in queries it will be easier
+ * to change the fields after the fact rather than try to coerce all the columns
+ * to upper case in the queries as this would require surrounding all columns with " and
+ * escaping them making them even harder to read than they are now.
+ * @return PgResultSet
+ */
+ protected PgResultSet upperCaseFieldLabels() {
+ for (Field field: fields ) {
+ field.upperCaseLabel();
+ }
+ return this;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java
new file mode 100644
index 0000000..fe5f98e
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import org.postgresql.PGResultSetMetaData;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.Field;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.util.GT;
+import org.postgresql.util.Gettable;
+import org.postgresql.util.GettableHashMap;
+import org.postgresql.util.JdbcBlackHole;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Types;
+
+public class PgResultSetMetaData implements ResultSetMetaData, PGResultSetMetaData {
+ protected final BaseConnection connection;
+ protected final Field[] fields;
+
+ private boolean fieldInfoFetched;
+
+ /**
+ * Initialise for a result with a tuple set and a field descriptor set
+ *
+ * @param connection the connection to retrieve metadata
+ * @param fields the array of field descriptors
+ */
+ public PgResultSetMetaData(BaseConnection connection, Field[] fields) {
+ this.connection = connection;
+ this.fields = fields;
+ this.fieldInfoFetched = false;
+ }
+
+ @Override
+ public int getColumnCount() throws SQLException {
+ return fields.length;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * It is believed that PostgreSQL does not support this feature.
+ *
+ * @param column the first column is 1, the second is 2...
+ * @return true if so
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isAutoIncrement(int column) throws SQLException {
+ fetchFieldMetaData();
+ Field field = getField(column);
+ FieldMetadata metadata = field.getMetadata();
+ return metadata != null && metadata.autoIncrement;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ *
Does a column's case matter? ASSUMPTION: Any field that is not obviously case insensitive is
+ * assumed to be case sensitive
+ *
+ * @param column the first column is 1, the second is 2...
+ * @return true if so
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isCaseSensitive(int column) throws SQLException {
+ Field field = getField(column);
+ return connection.getTypeInfo().isCaseSensitive(field.getOID());
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ *
Can the column be used in a WHERE clause? Basically for this, I split the functions into two
+ * types: recognised types (which are always useable), and OTHER types (which may or may not be
+ * useable). The OTHER types, for now, I will assume they are useable. We should really query the
+ * catalog to see if they are useable.
+ *
+ * @param column the first column is 1, the second is 2...
+ * @return true if they can be used in a WHERE clause
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isSearchable(int column) throws SQLException {
+ return true;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ *
Is the column a cash value? 6.1 introduced the cash/money type, which haven't been incorporated
+ * as of 970414, so I just check the type name for both 'cash' and 'money'
+ *
+ * @param column the first column is 1, the second is 2...
+ * @return true if its a cash column
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isCurrency(int column) throws SQLException {
+ String typeName = getPGType(column);
+
+ return "cash".equals(typeName) || "money".equals(typeName);
+ }
+
+ @Override
+ public int isNullable(int column) throws SQLException {
+ fetchFieldMetaData();
+ Field field = getField(column);
+ FieldMetadata metadata = field.getMetadata();
+ return metadata == null ? ResultSetMetaData.columnNullable : metadata.nullable;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ *
Is the column a signed number? In PostgreSQL, all numbers are signed, so this is trivial.
+ * However, strings are not signed (duh!)
+ *
+ * @param column the first column is 1, the second is 2...
+ * @return true if so
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isSigned(int column) throws SQLException {
+ Field field = getField(column);
+ return connection.getTypeInfo().isSigned(field.getOID());
+ }
+
+ @Override
+ public int getColumnDisplaySize(int column) throws SQLException {
+ Field field = getField(column);
+ return connection.getTypeInfo().getDisplaySize(field.getOID(), field.getMod());
+ }
+
+ @Override
+ public String getColumnLabel(int column) throws SQLException {
+ Field field = getField(column);
+ return field.getColumnLabel();
+ }
+
+ @Override
+ public String getColumnName(int column) throws SQLException {
+ return getColumnLabel(column);
+ }
+
+ @Override
+ public String getBaseColumnName(int column) throws SQLException {
+ Field field = getField(column);
+ if (field.getTableOid() == 0) {
+ return "";
+ }
+ fetchFieldMetaData();
+ FieldMetadata metadata = field.getMetadata();
+ return metadata == null ? "" : metadata.columnName;
+ }
+
+ @Override
+ public String getSchemaName(int column) throws SQLException {
+ return "";
+ }
+
+ private boolean populateFieldsWithMetadata(Gettable metadata) {
+ boolean allOk = true;
+ for (Field field : fields) {
+ if (field.getMetadata() != null) {
+ // No need to update metadata
+ continue;
+ }
+
+ final FieldMetadata fieldMetadata =
+ metadata.get(new FieldMetadata.Key(field.getTableOid(), field.getPositionInTable()));
+ if (fieldMetadata == null) {
+ allOk = false;
+ } else {
+ field.setMetadata(fieldMetadata);
+ }
+ }
+ fieldInfoFetched |= allOk;
+ return allOk;
+ }
+
+ private void fetchFieldMetaData() throws SQLException {
+ if (fieldInfoFetched) {
+ return;
+ }
+
+ if (populateFieldsWithMetadata(connection.getFieldMetadataCache())) {
+ return;
+ }
+
+ StringBuilder sql = new StringBuilder(
+ "SELECT c.oid, a.attnum, a.attname, c.relname, n.nspname, "
+ + "a.attnotnull OR (t.typtype = 'd' AND t.typnotnull), ");
+
+ if ( connection.haveMinimumServerVersion(ServerVersion.v10)) {
+ sql.append("a.attidentity != '' OR pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' ");
+ } else {
+ sql.append("pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' ");
+ }
+ sql.append("FROM pg_catalog.pg_class c "
+ + "JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) "
+ + "JOIN pg_catalog.pg_attribute a ON (c.oid = a.attrelid) "
+ + "JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) "
+ + "LEFT JOIN pg_catalog.pg_attrdef d ON (d.adrelid = a.attrelid AND d.adnum = a.attnum) "
+ + "JOIN (");
+
+ // 7.4 servers don't support row IN operations (a,b) IN ((c,d),(e,f))
+ // so we've got to fake that with a JOIN here.
+ //
+ boolean hasSourceInfo = false;
+ for (Field field : fields) {
+ if (field.getMetadata() != null) {
+ continue;
+ }
+
+ if (hasSourceInfo) {
+ sql.append(" UNION ALL ");
+ }
+
+ sql.append("SELECT ");
+ sql.append(field.getTableOid());
+ if (!hasSourceInfo) {
+ sql.append(" AS oid ");
+ }
+ sql.append(", ");
+ sql.append(field.getPositionInTable());
+ if (!hasSourceInfo) {
+ sql.append(" AS attnum");
+ }
+
+ if (!hasSourceInfo) {
+ hasSourceInfo = true;
+ }
+ }
+ sql.append(") vals ON (c.oid = vals.oid AND a.attnum = vals.attnum) ");
+
+ if (!hasSourceInfo) {
+ fieldInfoFetched = true;
+ return;
+ }
+
+ Statement stmt = connection.createStatement();
+ ResultSet rs = null;
+ GettableHashMap md = new GettableHashMap<>();
+ try {
+ rs = stmt.executeQuery(sql.toString());
+ while (rs.next()) {
+ int table = (int) rs.getLong(1);
+ int column = (int) rs.getLong(2);
+ String columnName = rs.getString(3);
+ String tableName = rs.getString(4);
+ String schemaName = rs.getString(5);
+ int nullable =
+ rs.getBoolean(6) ? ResultSetMetaData.columnNoNulls : ResultSetMetaData.columnNullable;
+ boolean autoIncrement = rs.getBoolean(7);
+ FieldMetadata fieldMetadata =
+ new FieldMetadata(columnName, tableName, schemaName, nullable, autoIncrement);
+ FieldMetadata.Key key = new FieldMetadata.Key(table, column);
+ md.put(key, fieldMetadata);
+ }
+ } finally {
+ JdbcBlackHole.close(rs);
+ JdbcBlackHole.close(stmt);
+ }
+ populateFieldsWithMetadata(md);
+ connection.getFieldMetadataCache().putAll(md);
+ }
+
+ @Override
+ public String getBaseSchemaName(int column) throws SQLException {
+ fetchFieldMetaData();
+ Field field = getField(column);
+ FieldMetadata metadata = field.getMetadata();
+ return metadata == null ? "" : metadata.schemaName;
+ }
+
+ @Override
+ public int getPrecision(int column) throws SQLException {
+ Field field = getField(column);
+ return connection.getTypeInfo().getPrecision(field.getOID(), field.getMod());
+ }
+
+ @Override
+ public int getScale(int column) throws SQLException {
+ Field field = getField(column);
+ return connection.getTypeInfo().getScale(field.getOID(), field.getMod());
+ }
+
+ @Override
+ public String getTableName(int column) throws SQLException {
+ return getBaseTableName(column);
+ }
+
+ @Override
+ public String getBaseTableName(int column) throws SQLException {
+ fetchFieldMetaData();
+ Field field = getField(column);
+ FieldMetadata metadata = field.getMetadata();
+ return metadata == null ? "" : metadata.tableName;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * As with getSchemaName(), we can say that if
+ * getTableName() returns n/a, then we can too - otherwise, we need to work on it.
+ *
+ * @param column the first column is 1, the second is 2...
+ * @return catalog name, or "" if not applicable
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public String getCatalogName(int column) throws SQLException {
+ return "";
+ }
+
+ @Override
+ public int getColumnType(int column) throws SQLException {
+ return getSQLType(column);
+ }
+
+ @Override
+ public int getFormat(int column) throws SQLException {
+ return getField(column).getFormat();
+ }
+
+ @Override
+ public String getColumnTypeName(int column) throws SQLException {
+ String type = getPGType(column);
+ if (isAutoIncrement(column)) {
+ if ("int4".equals(type)) {
+ return "serial";
+ } else if ("int8".equals(type)) {
+ return "bigserial";
+ } else if ("int2".equals(type) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+ return "smallserial";
+ }
+ }
+
+ return type;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ *
In reality, we would have to check the GRANT/REVOKE
+ * stuff for this to be effective, and I haven't really looked into that yet, so this will get
+ * re-visited.
+ *
+ * @param column the first column is 1, the second is 2, etc.*
+ * @return true if so*
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isReadOnly(int column) throws SQLException {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ *
In reality have to check
+ * the GRANT/REVOKE stuff, which I haven't worked with as yet. However, if it isn't ReadOnly, then
+ * it is obviously writable.
+ *
+ * @param column the first column is 1, the second is 2, etc.
+ * @return true if so
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isWritable(int column) throws SQLException {
+ return !isReadOnly(column);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ *
Hmmm...this is a bad one, since the two
+ * preceding functions have not been really defined. I cannot tell is the short answer. I thus
+ * return isWritable() just to give us an idea.
+ *
+ * @param column the first column is 1, the second is 2, etc..
+ * @return true if so
+ * @exception SQLException if a database access error occurs
+ */
+ @Override
+ public boolean isDefinitelyWritable(int column) throws SQLException {
+ return false;
+ }
+
+ // ********************************************************
+ // END OF PUBLIC INTERFACE
+ // ********************************************************
+
+ /**
+ * For several routines in this package, we need to convert a columnIndex into a Field[]
+ * descriptor. Rather than do the same code several times, here it is.
+ *
+ * @param columnIndex the first column is 1, the second is 2...
+ * @return the Field description
+ * @exception SQLException if a database access error occurs
+ */
+ protected Field getField(int columnIndex) throws SQLException {
+ if (columnIndex < 1 || columnIndex > fields.length) {
+ throw new PSQLException(
+ GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+ columnIndex, fields.length),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ return fields[columnIndex - 1];
+ }
+
+ protected String getPGType(int columnIndex) throws SQLException {
+ return connection.getTypeInfo().getPGType(getField(columnIndex).getOID());
+ }
+
+ protected int getSQLType(int columnIndex) throws SQLException {
+ return connection.getTypeInfo().getSQLType(getField(columnIndex).getOID());
+ }
+
+ // ** JDBC 2 Extensions **
+
+ // This can hook into our PG_Object mechanism
+
+ @Override
+ public String getColumnClassName(int column) throws SQLException {
+ Field field = getField(column);
+ String result = connection.getTypeInfo().getJavaClass(field.getOID());
+
+ if (result != null) {
+ return result;
+ }
+
+ int sqlType = getSQLType(column);
+ if (sqlType == Types.ARRAY) {
+ return "java.sql.Array";
+ } else {
+ String type = getPGType(column);
+ if ("unknown".equals(type)) {
+ return "java.lang.String";
+ }
+ return "java.lang.Object";
+ }
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException {
+ return iface.isAssignableFrom(getClass());
+ }
+
+ @Override
+ public T unwrap(Class iface) throws SQLException {
+ if (iface.isAssignableFrom(getClass())) {
+ return iface.cast(this);
+ }
+ throw new SQLException("Cannot unwrap to " + iface.getName());
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java
new file mode 100644
index 0000000..a904d76
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import org.postgresql.xml.DefaultPGXmlFactoryFactory;
+import org.postgresql.xml.PGXmlFactoryFactory;
+
+import org.xml.sax.InputSource;
+import org.xml.sax.XMLReader;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.sql.SQLException;
+import java.sql.SQLXML;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.stream.XMLInputFactory;
+import javax.xml.stream.XMLOutputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamReader;
+import javax.xml.stream.XMLStreamWriter;
+import javax.xml.transform.Result;
+import javax.xml.transform.Source;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMResult;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.sax.SAXResult;
+import javax.xml.transform.sax.SAXSource;
+import javax.xml.transform.sax.SAXTransformerFactory;
+import javax.xml.transform.sax.TransformerHandler;
+import javax.xml.transform.stax.StAXResult;
+import javax.xml.transform.stax.StAXSource;
+import javax.xml.transform.stream.StreamResult;
+import javax.xml.transform.stream.StreamSource;
+
+@SuppressWarnings("try")
+public class PgSQLXML implements SQLXML {
+
+ private final ResourceLock lock = new ResourceLock();
+ private final BaseConnection conn;
+ private String data; // The actual data contained.
+ private boolean initialized; // Has someone assigned the data for this object?
+ private boolean active; // Is anyone in the process of loading data into us?
+ private boolean freed;
+
+ private ByteArrayOutputStream byteArrayOutputStream;
+ private StringWriter stringWriter;
+ private DOMResult domResult;
+
+ public PgSQLXML(BaseConnection conn) {
+ this(conn, null, false);
+ }
+
+ public PgSQLXML(BaseConnection conn, String data) {
+ this(conn, data, true);
+ }
+
+ private PgSQLXML(BaseConnection conn, String data, boolean initialized) {
+ this.conn = conn;
+ this.data = data;
+ this.initialized = initialized;
+ this.active = false;
+ this.freed = false;
+ }
+
+ private PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException {
+ if (conn != null) {
+ return conn.getXmlFactoryFactory();
+ }
+ return DefaultPGXmlFactoryFactory.INSTANCE;
+ }
+
+ @Override
+ public void free() {
+ try (ResourceLock ignore = lock.obtain()) {
+ freed = true;
+ data = null;
+ }
+ }
+
+ @Override
+ public InputStream getBinaryStream() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ ensureInitialized();
+
+ if (data == null) {
+ return null;
+ }
+
+ try {
+ return new ByteArrayInputStream(conn.getEncoding().encode(data));
+ } catch (IOException ioe) {
+ // This should be a can't happen exception. We just
+ // decoded this data, so it would be surprising that
+ // we couldn't encode it.
+ // For this reason don't make it translatable.
+ throw new PSQLException("Failed to re-encode xml data.", PSQLState.DATA_ERROR, ioe);
+ }
+ }
+ }
+
+ @Override
+ public Reader getCharacterStream() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ ensureInitialized();
+
+ if (data == null) {
+ return null;
+ }
+
+ return new StringReader(data);
+ }
+ }
+
+ // We must implement this unsafely because that's what the
+ // interface requires. Because it says we're returning T
+ // which is unknown, none of the return values can satisfy it
+ // as Java isn't going to understand the if statements that
+ // ensure they are the same.
+ //
+ @SuppressWarnings("unchecked")
+ @Override
+ public T getSource(Class sourceClass)
+ throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ ensureInitialized();
+
+ String data = this.data;
+ if (data == null) {
+ return null;
+ }
+
+ try {
+ if (sourceClass == null || DOMSource.class.equals(sourceClass)) {
+ DocumentBuilder builder = getXmlFactoryFactory().newDocumentBuilder();
+ InputSource input = new InputSource(new StringReader(data));
+ DOMSource domSource = new DOMSource(builder.parse(input));
+ return (T) domSource;
+ } else if (SAXSource.class.equals(sourceClass)) {
+ XMLReader reader = getXmlFactoryFactory().createXMLReader();
+ InputSource is = new InputSource(new StringReader(data));
+ return sourceClass.cast(new SAXSource(reader, is));
+ } else if (StreamSource.class.equals(sourceClass)) {
+ return sourceClass.cast(new StreamSource(new StringReader(data)));
+ } else if (StAXSource.class.equals(sourceClass)) {
+ XMLInputFactory xif = getXmlFactoryFactory().newXMLInputFactory();
+ XMLStreamReader xsr = xif.createXMLStreamReader(new StringReader(data));
+ return sourceClass.cast(new StAXSource(xsr));
+ }
+ } catch (Exception e) {
+ throw new PSQLException(GT.tr("Unable to decode xml data."), PSQLState.DATA_ERROR, e);
+ }
+
+ throw new PSQLException(GT.tr("Unknown XML Source class: {0}", sourceClass),
+ PSQLState.INVALID_PARAMETER_TYPE);
+ }
+ }
+
+ @Override
+ public String getString() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ ensureInitialized();
+ return data;
+ }
+ }
+
+ @Override
+ public OutputStream setBinaryStream() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ initialize();
+ active = true;
+ byteArrayOutputStream = new ByteArrayOutputStream();
+ return byteArrayOutputStream;
+ }
+ }
+
+ @Override
+ public Writer setCharacterStream() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ initialize();
+ active = true;
+ stringWriter = new StringWriter();
+ return stringWriter;
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public T setResult(Class resultClass) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ initialize();
+
+ if (resultClass == null || DOMResult.class.equals(resultClass)) {
+ domResult = new DOMResult();
+ active = true;
+ return (T) domResult;
+ } else if (SAXResult.class.equals(resultClass)) {
+ try {
+ SAXTransformerFactory transformerFactory = getXmlFactoryFactory().newSAXTransformerFactory();
+ TransformerHandler transformerHandler = transformerFactory.newTransformerHandler();
+ stringWriter = new StringWriter();
+ transformerHandler.setResult(new StreamResult(stringWriter));
+ active = true;
+ return resultClass.cast(new SAXResult(transformerHandler));
+ } catch (TransformerException te) {
+ throw new PSQLException(GT.tr("Unable to create SAXResult for SQLXML."),
+ PSQLState.UNEXPECTED_ERROR, te);
+ }
+ } else if (StreamResult.class.equals(resultClass)) {
+ stringWriter = new StringWriter();
+ active = true;
+ return resultClass.cast(new StreamResult(stringWriter));
+ } else if (StAXResult.class.equals(resultClass)) {
+ StringWriter stringWriter = new StringWriter();
+ this.stringWriter = stringWriter;
+ try {
+ XMLOutputFactory xof = getXmlFactoryFactory().newXMLOutputFactory();
+ XMLStreamWriter xsw = xof.createXMLStreamWriter(stringWriter);
+ active = true;
+ return resultClass.cast(new StAXResult(xsw));
+ } catch (XMLStreamException xse) {
+ throw new PSQLException(GT.tr("Unable to create StAXResult for SQLXML"),
+ PSQLState.UNEXPECTED_ERROR, xse);
+ }
+ }
+
+ throw new PSQLException(GT.tr("Unknown XML Result class: {0}", resultClass),
+ PSQLState.INVALID_PARAMETER_TYPE);
+ }
+ }
+
+ @Override
+ public void setString(String value) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkFreed();
+ initialize();
+ data = value;
+ }
+ }
+
+ private void checkFreed() throws SQLException {
+ if (freed) {
+ throw new PSQLException(GT.tr("This SQLXML object has already been freed."),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ }
+
+ private void ensureInitialized() throws SQLException {
+ if (!initialized) {
+ throw new PSQLException(
+ GT.tr(
+ "This SQLXML object has not been initialized, so you cannot retrieve data from it."),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+
+ // Is anyone loading data into us at the moment?
+ if (!active) {
+ return;
+ }
+
+ if (byteArrayOutputStream != null) {
+ try {
+ data = conn.getEncoding().decode(byteArrayOutputStream.toByteArray());
+ } catch (IOException ioe) {
+ throw new PSQLException(GT.tr("Failed to convert binary xml data to encoding: {0}.",
+ conn.getEncoding().name()), PSQLState.DATA_ERROR, ioe);
+ } finally {
+ byteArrayOutputStream = null;
+ active = false;
+ }
+ } else if (stringWriter != null) {
+ // This is also handling the work for Stream, SAX, and StAX Results
+ // as they will use the same underlying stringwriter variable.
+ //
+ data = stringWriter.toString();
+ stringWriter = null;
+ active = false;
+ } else if (domResult != null) {
+ DOMResult domResult = this.domResult;
+ // Copy the content from the result to a source
+ // and use the identify transform to get it into a
+ // friendlier result format.
+ try {
+ TransformerFactory factory = getXmlFactoryFactory().newTransformerFactory();
+ Transformer transformer = factory.newTransformer();
+ DOMSource domSource = new DOMSource(domResult.getNode());
+ StringWriter stringWriter = new StringWriter();
+ StreamResult streamResult = new StreamResult(stringWriter);
+ transformer.transform(domSource, streamResult);
+ data = stringWriter.toString();
+ } catch (TransformerException te) {
+ throw new PSQLException(GT.tr("Unable to convert DOMResult SQLXML data to a string."),
+ PSQLState.DATA_ERROR, te);
+ } finally {
+ domResult = null;
+ active = false;
+ }
+ }
+ }
+
+ private void initialize() throws SQLException {
+ if (initialized) {
+ throw new PSQLException(
+ GT.tr(
+ "This SQLXML object has already been initialized, so you cannot manipulate it further."),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ initialized = true;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java
new file mode 100644
index 0000000..e27c6ad
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java
@@ -0,0 +1,1372 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import org.postgresql.Driver;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.BaseStatement;
+import org.postgresql.core.CachedQuery;
+import org.postgresql.core.Field;
+import org.postgresql.core.ParameterList;
+import org.postgresql.core.Query;
+import org.postgresql.core.QueryExecutor;
+import org.postgresql.core.ResultCursor;
+import org.postgresql.core.ResultHandlerBase;
+import org.postgresql.core.SqlCommand;
+import org.postgresql.core.Tuple;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TimerTask;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+@SuppressWarnings("try")
+public class PgStatement implements Statement, BaseStatement {
+ private static final String[] NO_RETURNING_COLUMNS = new String[0];
+
+ /**
+ * Default state for use or not binary transfers. Can use only for testing purposes
+ */
+ private static final boolean DEFAULT_FORCE_BINARY_TRANSFERS =
+ Boolean.getBoolean("org.postgresql.forceBinary");
+ // only for testing purposes. even single shot statements will use binary transfers
+ private boolean forceBinaryTransfers = DEFAULT_FORCE_BINARY_TRANSFERS;
+
+ protected final ResourceLock lock = new ResourceLock();
+ protected ArrayList batchStatements;
+ protected ArrayList batchParameters;
+ protected final int resultsettype; // the resultset type to return (ResultSet.TYPE_xxx)
+ protected final int concurrency; // is it updateable or not? (ResultSet.CONCUR_xxx)
+ private final int rsHoldability;
+ private boolean poolable;
+ private boolean closeOnCompletion;
+ protected int fetchdirection = ResultSet.FETCH_FORWARD;
+ // fetch direction hint (currently ignored)
+
+ /**
+ * Protects current statement from cancelTask starting, waiting for a bit, and waking up exactly
+ * on subsequent query execution. The idea is to atomically compare and swap the reference to the
+ * task, so the task can detect that statement executes different query than the one the
+ * cancelTask was created. Note: the field must be set/get/compareAndSet via
+ * {@link #CANCEL_TIMER_UPDATER} as per {@link AtomicReferenceFieldUpdater} javadoc.
+ */
+ private volatile TimerTask cancelTimerTask;
+
+ private static final AtomicReferenceFieldUpdater CANCEL_TIMER_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(
+ PgStatement.class, TimerTask.class, "cancelTimerTask");
+
+ /**
+ * Protects statement from out-of-order cancels. It protects from both
+ * {@link #setQueryTimeout(int)} and {@link #cancel()} induced ones.
+ *
+ * {@link #execute(String)} and friends change the field to
+ * {@link StatementCancelState#IN_QUERY} during execute. {@link #cancel()}
+ * ignores cancel request if state is {@link StatementCancelState#IDLE}.
+ * In case {@link #execute(String)} observes non-{@link StatementCancelState#IDLE} state as it
+ * completes the query, it waits till {@link StatementCancelState#CANCELLED}. Note: the field must be
+ * set/get/compareAndSet via {@link #STATE_UPDATER} as per {@link AtomicIntegerFieldUpdater}
+ * javadoc.
+ */
+ private volatile StatementCancelState statementState = StatementCancelState.IDLE;
+
+ private static final AtomicReferenceFieldUpdater STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(PgStatement.class, StatementCancelState.class, "statementState");
+
+ /**
+ * Does the caller of execute/executeUpdate want generated keys for this execution? This is set by
+ * Statement methods that have generated keys arguments and cleared after execution is complete.
+ */
+ protected boolean wantsGeneratedKeysOnce;
+
+ /**
+ * Was this PreparedStatement created to return generated keys for every execution? This is set at
+ * creation time and never cleared by execution.
+ */
+ public boolean wantsGeneratedKeysAlways;
+
+ // The connection who created us
+ protected final PgConnection connection;
+
+ /**
+ * The warnings chain.
+ */
+ protected volatile PSQLWarningWrapper warnings;
+
+ /**
+ * Maximum number of rows to return, 0 = unlimited.
+ */
+ protected int maxrows;
+
+ /**
+ * Number of rows to get in a batch.
+ */
+ protected int fetchSize;
+
+ /**
+ * Timeout (in milliseconds) for a query.
+ */
+ protected long timeout;
+
+ protected boolean replaceProcessingEnabled = true;
+
+ /**
+ * The current results.
+ */
+ protected ResultWrapper result;
+
+ /**
+ * The first unclosed result.
+ */
+ protected ResultWrapper firstUnclosedResult;
+
+ /**
+ * Results returned by a statement that wants generated keys.
+ */
+ protected ResultWrapper generatedKeys;
+
+ protected int mPrepareThreshold; // Reuse threshold to enable use of PREPARE
+
+ protected int maxFieldSize;
+
+ protected boolean adaptiveFetch;
+
+ private TimestampUtils timestampUtils; // our own Object because it's not thread safe
+
+ PgStatement(PgConnection c, int rsType, int rsConcurrency, int rsHoldability)
+ throws SQLException {
+ this.connection = c;
+ forceBinaryTransfers |= c.getForceBinary();
+ // validation check for allowed values of resultset type
+ if (rsType != ResultSet.TYPE_FORWARD_ONLY && rsType != ResultSet.TYPE_SCROLL_INSENSITIVE && rsType != ResultSet.TYPE_SCROLL_SENSITIVE) {
+ throw new PSQLException(GT.tr("Unknown value for ResultSet type"),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ resultsettype = rsType;
+ // validation check for allowed values of resultset concurrency
+ if (rsConcurrency != ResultSet.CONCUR_READ_ONLY && rsConcurrency != ResultSet.CONCUR_UPDATABLE) {
+ throw new PSQLException(GT.tr("Unknown value for ResultSet concurrency"),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ concurrency = rsConcurrency;
+ setFetchSize(c.getDefaultFetchSize());
+ setPrepareThreshold(c.getPrepareThreshold());
+ setAdaptiveFetch(c.getAdaptiveFetch());
+ // validation check for allowed values of resultset holdability
+ if (rsHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT && rsHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) {
+ throw new PSQLException(GT.tr("Unknown value for ResultSet holdability"),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ this.rsHoldability = rsHoldability;
+ }
+
+ @Override
+ public ResultSet createResultSet(Query originalQuery, Field[] fields, List tuples,
+ ResultCursor cursor) throws SQLException {
+ PgResultSet newResult = new PgResultSet(originalQuery, this, fields, tuples, cursor,
+ getMaxRows(), getMaxFieldSize(), getResultSetType(), getResultSetConcurrency(),
+ getResultSetHoldability(), getAdaptiveFetch());
+ newResult.setFetchSize(getFetchSize());
+ newResult.setFetchDirection(getFetchDirection());
+ return newResult;
+ }
+
+ public BaseConnection getPGConnection() {
+ return connection;
+ }
+
+ public String getFetchingCursorName() {
+ return null;
+ }
+
+ @Override
+ public int getFetchSize() {
+ return fetchSize;
+ }
+
+ protected boolean wantsScrollableResultSet() {
+ return resultsettype != ResultSet.TYPE_FORWARD_ONLY;
+ }
+
+ protected boolean wantsHoldableResultSet() {
+ // FIXME: false if not supported
+ return rsHoldability == ResultSet.HOLD_CURSORS_OVER_COMMIT;
+ }
+
+ /**
+ * ResultHandler implementations for updates, queries, and either-or.
+ */
+ public class StatementResultHandler extends ResultHandlerBase {
+ private ResultWrapper results;
+ private ResultWrapper lastResult;
+
+ public StatementResultHandler() {
+ }
+
+ ResultWrapper getResults() {
+ return results;
+ }
+
+ private void append(ResultWrapper newResult) {
+ if (results == null) {
+ lastResult = results = newResult;
+ } else {
+ lastResult.append(newResult);
+ }
+ }
+
+ @Override
+ public void handleResultRows(Query fromQuery, Field[] fields, List tuples,
+ ResultCursor cursor) {
+ try {
+ ResultSet rs = PgStatement.this.createResultSet(fromQuery, fields, tuples, cursor);
+ append(new ResultWrapper(rs));
+ } catch (SQLException e) {
+ handleError(e);
+ }
+ }
+
+ @Override
+ public void handleCommandStatus(String status, long updateCount, long insertOID) {
+ append(new ResultWrapper(updateCount, insertOID));
+ }
+
+ @Override
+ public void handleWarning(SQLWarning warning) {
+ PgStatement.this.addWarning(warning);
+ }
+
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (!executeWithFlags(sql, 0)) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ return getSingleResultSet();
+ }
+ }
+
+ protected ResultSet getSingleResultSet() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ ResultWrapper result = this.result;
+ if (result.getNext() != null) {
+ throw new PSQLException(GT.tr("Multiple ResultSets were returned by the query."),
+ PSQLState.TOO_MANY_RESULTS);
+ }
+
+ return result.getResultSet();
+ }
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS);
+ checkNoResultUpdate();
+ return getUpdateCount();
+ }
+ }
+
+ protected final void checkNoResultUpdate() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ ResultWrapper iter = result;
+ while (iter != null) {
+ if (iter.getResultSet() != null) {
+ throw new PSQLException(GT.tr("A result was returned when none was expected."),
+ PSQLState.TOO_MANY_RESULTS);
+ }
+ iter = iter.getNext();
+ }
+ }
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ return executeWithFlags(sql, 0);
+ }
+
+ @Override
+ public boolean executeWithFlags(String sql, int flags) throws SQLException {
+ return executeCachedSql(sql, flags, NO_RETURNING_COLUMNS);
+ }
+
+ private boolean executeCachedSql(String sql, int flags,
+ String [] columnNames) throws SQLException {
+ PreferQueryMode preferQueryMode = connection.getPreferQueryMode();
+ // Simple statements should not replace ?, ? with $1, $2
+ boolean shouldUseParameterized = false;
+ QueryExecutor queryExecutor = connection.getQueryExecutor();
+ Object key = queryExecutor
+ .createQueryKey(sql, replaceProcessingEnabled, shouldUseParameterized, columnNames);
+ CachedQuery cachedQuery;
+ boolean shouldCache = preferQueryMode == PreferQueryMode.EXTENDED_CACHE_EVERYTHING;
+ if (shouldCache) {
+ cachedQuery = queryExecutor.borrowQueryByKey(key);
+ } else {
+ cachedQuery = queryExecutor.createQueryByKey(key);
+ }
+ if (wantsGeneratedKeysOnce) {
+ SqlCommand sqlCommand = cachedQuery.query.getSqlCommand();
+ wantsGeneratedKeysOnce = sqlCommand != null && sqlCommand.isReturningKeywordPresent();
+ }
+ boolean res;
+ try {
+ res = executeWithFlags(cachedQuery, flags);
+ } finally {
+ if (shouldCache) {
+ queryExecutor.releaseQuery(cachedQuery);
+ }
+ }
+ return res;
+ }
+
+ @Override
+ public boolean executeWithFlags(CachedQuery simpleQuery, int flags) throws SQLException {
+ checkClosed();
+ if (connection.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) < 0) {
+ flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+ }
+ execute(simpleQuery, null, flags);
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ return result != null && result.getResultSet() != null;
+ }
+ }
+
+ @Override
+ public boolean executeWithFlags(int flags) throws SQLException {
+ checkClosed();
+ throw new PSQLException(GT.tr("Can''t use executeWithFlags(int) on a Statement."),
+ PSQLState.WRONG_OBJECT_TYPE);
+ }
+
+ /*
+ If there are multiple result sets we close any that have been processed and left open
+ by the client.
+ */
+ private void closeUnclosedProcessedResults() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ ResultWrapper resultWrapper = this.firstUnclosedResult;
+ ResultWrapper currentResult = this.result;
+ for (; resultWrapper != currentResult && resultWrapper != null;
+ resultWrapper = resultWrapper.getNext()) {
+ PgResultSet rs = (PgResultSet) resultWrapper.getResultSet();
+ if (rs != null) {
+ rs.closeInternally();
+ }
+ }
+ firstUnclosedResult = resultWrapper;
+ }
+ }
+
+ protected void closeForNextExecution() throws SQLException {
+
+ // Every statement execution clears any previous warnings.
+ clearWarnings();
+
+ // Close any existing resultsets associated with this statement.
+ try (ResourceLock ignore = lock.obtain()) {
+ closeUnclosedProcessedResults();
+
+ if ( this.result != null && this.result.getResultSet() != null ) {
+ this.result.getResultSet().close();
+ }
+ result = null;
+
+ ResultWrapper generatedKeys = this.generatedKeys;
+ if (generatedKeys != null) {
+ ResultSet resultSet = generatedKeys.getResultSet();
+ if (resultSet != null) {
+ resultSet.close();
+ }
+ this.generatedKeys = null;
+ }
+ }
+ }
+
+ /**
+ * Returns true if query is unlikely to be reused.
+ *
+ * @param cachedQuery to check (null if current query)
+ * @return true if query is unlikely to be reused
+ */
+ protected boolean isOneShotQuery(CachedQuery cachedQuery) {
+ if (cachedQuery == null) {
+ return true;
+ }
+ cachedQuery.increaseExecuteCount();
+ return (mPrepareThreshold == 0 || cachedQuery.getExecuteCount() < mPrepareThreshold)
+ && !getForceBinaryTransfer();
+ }
+
+ protected final void execute(CachedQuery cachedQuery,
+ ParameterList queryParameters, int flags)
+ throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ try {
+ executeInternal(cachedQuery, queryParameters, flags);
+ } catch (SQLException e) {
+ // Don't retry composite queries as it might get partially executed
+ if (cachedQuery.query.getSubqueries() != null
+ || !connection.getQueryExecutor().willHealOnRetry(e)) {
+ throw e;
+ }
+ cachedQuery.query.close();
+ // Execute the query one more time
+ executeInternal(cachedQuery, queryParameters, flags);
+ }
+ }
+ }
+
+ private void executeInternal(CachedQuery cachedQuery,
+ ParameterList queryParameters, int flags)
+ throws SQLException {
+ closeForNextExecution();
+
+ // Enable cursor-based resultset if possible.
+ if (fetchSize > 0 && !wantsScrollableResultSet() && !connection.getAutoCommit()
+ && !wantsHoldableResultSet()) {
+ flags |= QueryExecutor.QUERY_FORWARD_CURSOR;
+ }
+
+ if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) {
+ flags |= QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS;
+
+ // If the no results flag is set (from executeUpdate)
+ // clear it so we get the generated keys results.
+ //
+ if ((flags & QueryExecutor.QUERY_NO_RESULTS) != 0) {
+ flags &= ~(QueryExecutor.QUERY_NO_RESULTS);
+ }
+ }
+
+ // Only use named statements after we hit the threshold. Note that only
+ // named statements can be transferred in binary format.
+ // isOneShotQuery will check to see if we have hit the prepareThreshold count
+
+ if (isOneShotQuery(cachedQuery)) {
+ flags |= QueryExecutor.QUERY_ONESHOT;
+ }
+
+ if (connection.getAutoCommit()) {
+ flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
+ }
+ if (connection.hintReadOnly()) {
+ flags |= QueryExecutor.QUERY_READ_ONLY_HINT;
+ }
+
+ // updateable result sets do not yet support binary updates
+ if (concurrency != ResultSet.CONCUR_READ_ONLY) {
+ flags |= QueryExecutor.QUERY_NO_BINARY_TRANSFER;
+ }
+
+ Query queryToExecute = cachedQuery.query;
+
+ if (queryToExecute.isEmpty()) {
+ flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
+ }
+
+ if (!queryToExecute.isStatementDescribed() && forceBinaryTransfers
+ && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) {
+ // Simple 'Q' execution does not need to know parameter types
+ // When binaryTransfer is forced, then we need to know resulting parameter and column types,
+ // thus sending a describe request.
+ int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY;
+ StatementResultHandler handler2 = new StatementResultHandler();
+ connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler2, 0, 0,
+ flags2);
+ ResultWrapper result2 = handler2.getResults();
+ if (result2 != null) {
+ result2.getResultSet().close();
+ }
+ }
+
+ StatementResultHandler handler = new StatementResultHandler();
+ try (ResourceLock ignore = lock.obtain()) {
+ result = null;
+ }
+ try {
+ startTimer();
+ connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler, maxrows,
+ fetchSize, flags, adaptiveFetch);
+ } finally {
+ killTimerTask();
+ }
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+
+ ResultWrapper currentResult = handler.getResults();
+ result = firstUnclosedResult = currentResult;
+
+ if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) {
+ generatedKeys = currentResult;
+ result = currentResult.getNext();
+
+ if (wantsGeneratedKeysOnce) {
+ wantsGeneratedKeysOnce = false;
+ }
+ }
+ }
+ }
+
+ @Override
+ public void setCursorName(String name) throws SQLException {
+ checkClosed();
+ // No-op.
+ }
+
+ private volatile int isClosed;
+ private static final AtomicIntegerFieldUpdater IS_CLOSED_UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(
+ PgStatement.class, "isClosed");
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ if (result == null || result.getResultSet() != null) {
+ return -1;
+ }
+
+ long count = result.getUpdateCount();
+ return count > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) count;
+ }
+ }
+
+ @Override
+ public boolean getMoreResults() throws SQLException {
+ return getMoreResults(CLOSE_ALL_RESULTS);
+ }
+
+ @Override
+ public int getMaxRows() throws SQLException {
+ checkClosed();
+ return maxrows;
+ }
+
+ @Override
+ public void setMaxRows(int max) throws SQLException {
+ checkClosed();
+ if (max < 0) {
+ throw new PSQLException(
+ GT.tr("Maximum number of rows must be a value greater than or equal to 0."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ maxrows = max;
+ }
+
+ @Override
+ public void setEscapeProcessing(boolean enable) throws SQLException {
+ checkClosed();
+ replaceProcessingEnabled = enable;
+ }
+
+ @Override
+ public int getQueryTimeout() throws SQLException {
+ checkClosed();
+ long seconds = timeout / 1000;
+ if (seconds >= Integer.MAX_VALUE) {
+ return Integer.MAX_VALUE;
+ }
+ return (int) seconds;
+ }
+
+ @Override
+ public void setQueryTimeout(int seconds) throws SQLException {
+ setQueryTimeoutMs(seconds * 1000L);
+ }
+
+ /**
+ * The queryTimeout limit is the number of milliseconds the driver will wait for a Statement to
+ * execute. If the limit is exceeded, a SQLException is thrown.
+ *
+ * @return the current query timeout limit in milliseconds; 0 = unlimited
+ * @throws SQLException if a database access error occurs
+ */
+ public long getQueryTimeoutMs() throws SQLException {
+ checkClosed();
+ return timeout;
+ }
+
+ /**
+ * Sets the queryTimeout limit.
+ *
+ * @param millis - the new query timeout limit in milliseconds
+ * @throws SQLException if a database access error occurs
+ */
+ public void setQueryTimeoutMs(long millis) throws SQLException {
+ checkClosed();
+
+ if (millis < 0) {
+ throw new PSQLException(GT.tr("Query timeout must be a value greater than or equals to 0."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ timeout = millis;
+ }
+
+ /**
+ * Either initializes new warning wrapper, or adds warning onto the chain.
+ *
+ * Although warnings are expected to be added sequentially, the warnings chain may be cleared
+ * concurrently at any time via {@link #clearWarnings()}, therefore it is possible that a warning
+ * added via this method is placed onto the end of the previous warning chain
+ *
+ * @param warn warning to add
+ */
+ public void addWarning(SQLWarning warn) {
+ //copy reference to avoid NPE from concurrent modification of this.warnings
+ final PSQLWarningWrapper warnWrap = this.warnings;
+ if (warnWrap == null) {
+ this.warnings = new PSQLWarningWrapper(warn);
+ } else {
+ warnWrap.addWarning(warn);
+ }
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ checkClosed();
+ //copy reference to avoid NPE from concurrent modification of this.warnings
+ final PSQLWarningWrapper warnWrap = this.warnings;
+ return warnWrap != null ? warnWrap.getFirstWarning() : null;
+ }
+
+ @Override
+ public int getMaxFieldSize() throws SQLException {
+ return maxFieldSize;
+ }
+
+ @Override
+ public void setMaxFieldSize(int max) throws SQLException {
+ checkClosed();
+ if (max < 0) {
+ throw new PSQLException(
+ GT.tr("The maximum field size must be a value greater than or equal to 0."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ maxFieldSize = max;
+ }
+
+ /**
+ * Clears the warning chain.
+ * Note that while it is safe to clear warnings while the query is executing, warnings that are
+ * added between calls to {@link #getWarnings()} and #clearWarnings() may be missed.
+ * Therefore you should hold a reference to the tail of the previous warning chain
+ * and verify if its {@link SQLWarning#getNextWarning()} value is holds any new value.
+ */
+ @Override
+ public void clearWarnings() throws SQLException {
+ warnings = null;
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+
+ if (result == null) {
+ return null;
+ }
+
+ return result.getResultSet();
+ }
+ }
+
+ /**
+ * Note: even though {@code Statement} is automatically closed when it is garbage
+ * collected, it is better to close it explicitly to lower resource consumption.
+ *
+ * {@inheritDoc}
+ */
+ @Override
+ public final void close() throws SQLException {
+ // closing an already closed Statement is a no-op.
+ if (!IS_CLOSED_UPDATER.compareAndSet(this, 0, 1)) {
+ return;
+ }
+
+ cancel();
+
+ closeForNextExecution();
+
+ closeImpl();
+ }
+
+ /**
+ * This is guaranteed to be called exactly once even in case of concurrent {@link #close()} calls.
+ * @throws SQLException in case of error
+ */
+ protected void closeImpl() throws SQLException {
+ }
+
+ /*
+ *
+ * The following methods are postgres extensions and are defined in the interface BaseStatement
+ *
+ */
+
+ @Override
+ public long getLastOID() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ if (result == null) {
+ return 0;
+ }
+ return result.getInsertOID();
+ }
+ }
+
+ @Override
+ public void setPrepareThreshold(int newThreshold) throws SQLException {
+ checkClosed();
+
+ if (newThreshold < 0) {
+ forceBinaryTransfers = true;
+ newThreshold = 1;
+ }
+
+ this.mPrepareThreshold = newThreshold;
+ }
+
+ @Override
+ public int getPrepareThreshold() {
+ return mPrepareThreshold;
+ }
+
+ @Override
+ @SuppressWarnings("deprecation")
+ public void setUseServerPrepare(boolean flag) throws SQLException {
+ setPrepareThreshold(flag ? 1 : 0);
+ }
+
+ @Override
+ public boolean isUseServerPrepare() {
+ return false;
+ }
+
+ protected void checkClosed() throws SQLException {
+ if (isClosed()) {
+ throw new PSQLException(GT.tr("This statement has been closed."),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ }
+
+ // ** JDBC 2 Extensions **
+
+ @Override
+ public void addBatch(String sql) throws SQLException {
+ checkClosed();
+
+ ArrayList batchStatements = this.batchStatements;
+ if (batchStatements == null) {
+ this.batchStatements = batchStatements = new ArrayList<>();
+ }
+ ArrayList batchParameters = this.batchParameters;
+ if (batchParameters == null) {
+ this.batchParameters = batchParameters = new ArrayList();
+ }
+
+ // Simple statements should not replace ?, ? with $1, $2
+ boolean shouldUseParameterized = false;
+ CachedQuery cachedQuery = connection.createQuery(sql, replaceProcessingEnabled, shouldUseParameterized);
+ batchStatements.add(cachedQuery.query);
+ batchParameters.add(null);
+ }
+
+ @Override
+ public void clearBatch() throws SQLException {
+ if (batchStatements != null) {
+ batchStatements.clear();
+ }
+ if (batchParameters != null) {
+ batchParameters.clear();
+ }
+ }
+
+ protected BatchResultHandler createBatchHandler(Query[] queries,
+ ParameterList[] parameterLists) {
+ return new BatchResultHandler(this, queries, parameterLists,
+ wantsGeneratedKeysAlways);
+ }
+
+ private BatchResultHandler internalExecuteBatch() throws SQLException {
+ // Construct query/parameter arrays.
+ transformQueriesAndParameters();
+ ArrayList batchStatements = this.batchStatements;
+ ArrayList batchParameters = this.batchParameters;
+ // Empty arrays should be passed to toArray
+ // see http://shipilev.net/blog/2016/arrays-wisdom-ancients/
+ Query[] queries = batchStatements.toArray(new Query[0]);
+ ParameterList[] parameterLists = batchParameters.toArray(new ParameterList[0]);
+ batchStatements.clear();
+ batchParameters.clear();
+
+ int flags;
+
+ // Force a Describe before any execution? We need to do this if we're going
+ // to send anything dependent on the Describe results, e.g. binary parameters.
+ boolean preDescribe = false;
+
+ if (wantsGeneratedKeysAlways) {
+ /*
+ * This batch will return generated keys, tell the executor to expect result rows. We also
+ * force a Describe later so we know the size of the results to expect.
+ *
+ * If the parameter type(s) change between batch entries and the default binary-mode changes
+ * we might get mixed binary and text in a single result set column, which we cannot handle.
+ * To prevent this, disable binary transfer mode in batches that return generated keys. See
+ * GitHub issue #267
+ */
+ flags = QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS | QueryExecutor.QUERY_NO_BINARY_TRANSFER;
+ } else {
+ // If a batch hasn't specified that it wants generated keys, using the appropriate
+ // Connection.createStatement(...) interfaces, disallow any result set.
+ flags = QueryExecutor.QUERY_NO_RESULTS;
+ }
+
+ PreferQueryMode preferQueryMode = connection.getPreferQueryMode();
+ if (preferQueryMode == PreferQueryMode.SIMPLE
+ || (preferQueryMode == PreferQueryMode.EXTENDED_FOR_PREPARED
+ && parameterLists[0] == null)) {
+ flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+ }
+
+ boolean sameQueryAhead = queries.length > 1 && queries[0] == queries[1];
+
+ if (!sameQueryAhead
+ // If executing the same query twice in a batch, make sure the statement
+ // is server-prepared. In other words, "oneshot" only if the query is one in the batch
+ // or the queries are different
+ || isOneShotQuery(null)) {
+ flags |= QueryExecutor.QUERY_ONESHOT;
+ } else {
+ // If a batch requests generated keys and isn't already described,
+ // force a Describe of the query before proceeding. That way we can
+ // determine the appropriate size of each batch by estimating the
+ // maximum data returned. Without that, we don't know how many queries
+ // we'll be able to queue up before we risk a deadlock.
+ // (see v3.QueryExecutorImpl's MAX_BUFFERED_RECV_BYTES)
+
+ // SameQueryAhead is just a quick way to issue pre-describe for batch execution
+ // TODO: It should be reworked into "pre-describe if query has unknown parameter
+ // types and same query is ahead".
+ preDescribe = (wantsGeneratedKeysAlways || sameQueryAhead)
+ && !queries[0].isStatementDescribed();
+ /*
+ * It's also necessary to force a Describe on the first execution of the new statement, even
+ * though we already described it, to work around bug #267.
+ */
+ flags |= QueryExecutor.QUERY_FORCE_DESCRIBE_PORTAL;
+ }
+
+ if (connection.getAutoCommit()) {
+ flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
+ }
+ if (connection.hintReadOnly()) {
+ flags |= QueryExecutor.QUERY_READ_ONLY_HINT;
+ }
+
+ BatchResultHandler handler;
+ handler = createBatchHandler(queries, parameterLists);
+
+ if ((preDescribe || forceBinaryTransfers)
+ && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) {
+ // Do a client-server round trip, parsing and describing the query so we
+ // can determine its result types for use in binary parameters, batch sizing,
+ // etc.
+ int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY;
+ StatementResultHandler handler2 = new StatementResultHandler();
+ try {
+ connection.getQueryExecutor().execute(queries[0], parameterLists[0], handler2, 0, 0, flags2);
+ } catch (SQLException e) {
+ // Unable to parse the first statement -> throw BatchUpdateException
+ handler.handleError(e);
+ handler.handleCompletion();
+ // Will not reach here (see above)
+ }
+ ResultWrapper result2 = handler2.getResults();
+ if (result2 != null) {
+ result2.getResultSet().close();
+ }
+ }
+
+ try (ResourceLock ignore = lock.obtain()) {
+ result = null;
+ }
+
+ try {
+ startTimer();
+ connection.getQueryExecutor().execute(queries, parameterLists, handler, maxrows, fetchSize,
+ flags, adaptiveFetch);
+ } finally {
+ killTimerTask();
+ // There might be some rows generated even in case of failures
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ if (wantsGeneratedKeysAlways) {
+ generatedKeys = new ResultWrapper(handler.getGeneratedKeys());
+ }
+ }
+ }
+ return handler;
+ }
+
+ @Override
+ public int[] executeBatch() throws SQLException {
+ checkClosed();
+ closeForNextExecution();
+
+ if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) {
+ return new int[0];
+ }
+
+ return internalExecuteBatch().getUpdateCount();
+ }
+
+ @Override
+ public void cancel() throws SQLException {
+ if (statementState == StatementCancelState.IDLE) {
+ return;
+ }
+ if (!STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY,
+ StatementCancelState.CANCELING)) {
+ // Not in query, there's nothing to cancel
+ return;
+ }
+ // Use connection lock to avoid spinning in killTimerTask
+ try (ResourceLock connectionLock = connection.obtainLock()) {
+ try {
+ connection.cancelQuery();
+ } finally {
+ STATE_UPDATER.set(this, StatementCancelState.CANCELLED);
+ connection.lockCondition().signalAll(); // wake-up killTimerTask
+ }
+ }
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return connection;
+ }
+
+ @Override
+ public int getFetchDirection() {
+ return fetchdirection;
+ }
+
+ @Override
+ public int getResultSetConcurrency() {
+ return concurrency;
+ }
+
+ @Override
+ public int getResultSetType() {
+ return resultsettype;
+ }
+
+ @Override
+ public void setFetchDirection(int direction) throws SQLException {
+ switch (direction) {
+ case ResultSet.FETCH_FORWARD:
+ case ResultSet.FETCH_REVERSE:
+ case ResultSet.FETCH_UNKNOWN:
+ fetchdirection = direction;
+ break;
+ default:
+ throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ }
+
+ @Override
+ public void setFetchSize(int rows) throws SQLException {
+ checkClosed();
+ if (rows < 0) {
+ throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."),
+ PSQLState.INVALID_PARAMETER_VALUE);
+ }
+ fetchSize = rows;
+ }
+
+ private void startTimer() {
+ /*
+ * there shouldn't be any previous timer active, but better safe than sorry.
+ */
+ cleanupTimer();
+
+ STATE_UPDATER.set(this, StatementCancelState.IN_QUERY);
+
+ if (timeout == 0) {
+ return;
+ }
+
+ TimerTask cancelTask = new StatementCancelTimerTask(this);
+
+ CANCEL_TIMER_UPDATER.set(this, cancelTask);
+ connection.addTimerTask(cancelTask, timeout);
+ }
+
+ void cancelIfStillNeeded(TimerTask timerTask) {
+ try {
+ if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) {
+ // Nothing to do here, statement has already finished and cleared
+ // cancelTimerTask reference
+ return;
+ }
+ cancel();
+ } catch (SQLException e) {
+ }
+ }
+
+ /**
+ * Clears {@link #cancelTimerTask} if any. Returns true if and only if "cancel" timer task would
+ * never invoke {@link #cancel()}.
+ */
+ private boolean cleanupTimer() {
+ TimerTask timerTask = CANCEL_TIMER_UPDATER.get(this);
+ if (timerTask == null) {
+ // If timeout is zero, then timer task did not exist, so we safely report "all clear"
+ return timeout == 0;
+ }
+ if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) {
+ // Failed to update reference -> timer has just fired, so we must wait for the query state to
+ // become "cancelling".
+ return false;
+ }
+ timerTask.cancel();
+ connection.purgeTimerTasks();
+ // All clear
+ return true;
+ }
+
+ private void killTimerTask() {
+ boolean timerTaskIsClear = cleanupTimer();
+ // The order is important here: in case we need to wait for the cancel task, the state must be
+ // kept StatementCancelState.IN_QUERY, so cancelTask would be able to cancel the query.
+ // It is believed that this case is very rare, so "additional cancel and wait below" would not
+ // harm it.
+ if (timerTaskIsClear && STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY, StatementCancelState.IDLE)) {
+ return;
+ }
+
+ // Being here means someone managed to call .cancel() and our connection did not receive
+ // "timeout error"
+ // We wait till state becomes "cancelled"
+ boolean interrupted = false;
+ try (ResourceLock connectionLock = connection.obtainLock()) {
+ // state check is performed with connection lock so it detects "cancelled" state faster
+ // In other words, it prevents unnecessary ".wait()" call
+ while (!STATE_UPDATER.compareAndSet(this, StatementCancelState.CANCELLED, StatementCancelState.IDLE)) {
+ try {
+ // Note: wait timeout here is irrelevant since connection.obtainLock() would block until
+ // .cancel finishes
+ connection.lockCondition().await(10, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) { // NOSONAR
+ // Either re-interrupt this method or rethrow the "InterruptedException"
+ interrupted = true;
+ }
+ }
+ }
+ if (interrupted) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ protected boolean getForceBinaryTransfer() {
+ return forceBinaryTransfers;
+ }
+
+ @Override
+ public long getLargeUpdateCount() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ if (result == null || result.getResultSet() != null) {
+ return -1;
+ }
+
+ return result.getUpdateCount();
+ }
+ }
+
+ @Override
+ public void setLargeMaxRows(long max) throws SQLException {
+ throw Driver.notImplemented(this.getClass(), "setLargeMaxRows");
+ }
+
+ @Override
+ public long getLargeMaxRows() throws SQLException {
+ throw Driver.notImplemented(this.getClass(), "getLargeMaxRows");
+ }
+
+ @Override
+ public long[] executeLargeBatch() throws SQLException {
+ checkClosed();
+ closeForNextExecution();
+
+ if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) {
+ return new long[0];
+ }
+
+ return internalExecuteBatch().getLargeUpdateCount();
+ }
+
+ @Override
+ public long executeLargeUpdate(String sql) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS);
+ checkNoResultUpdate();
+ return getLargeUpdateCount();
+ }
+ }
+
+ @Override
+ public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
+ return executeLargeUpdate(sql);
+ }
+
+ return executeLargeUpdate(sql, (String[]) null);
+ }
+
+ @Override
+ public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ if (columnIndexes == null || columnIndexes.length == 0) {
+ return executeLargeUpdate(sql);
+ }
+
+ throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
+ PSQLState.NOT_IMPLEMENTED);
+ }
+
+ @Override
+ public long executeLargeUpdate(String sql, String [] columnNames) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (columnNames != null && columnNames.length == 0) {
+ return executeLargeUpdate(sql);
+ }
+
+ wantsGeneratedKeysOnce = true;
+ if (!executeCachedSql(sql, 0, columnNames)) {
+ // no resultset returned. What's a pity!
+ }
+ return getLargeUpdateCount();
+ }
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return isClosed == 1;
+ }
+
+ @Override
+ public void setPoolable(boolean poolable) throws SQLException {
+ checkClosed();
+ this.poolable = poolable;
+ }
+
+ @Override
+ public boolean isPoolable() throws SQLException {
+ checkClosed();
+ return poolable;
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException {
+ return iface.isAssignableFrom(getClass());
+ }
+
+ @Override
+ public T unwrap(Class iface) throws SQLException {
+ if (iface.isAssignableFrom(getClass())) {
+ return iface.cast(this);
+ }
+ throw new SQLException("Cannot unwrap to " + iface.getName());
+ }
+
+ @Override
+ public void closeOnCompletion() throws SQLException {
+ closeOnCompletion = true;
+ }
+
+ @Override
+ public boolean isCloseOnCompletion() throws SQLException {
+ return closeOnCompletion;
+ }
+
+ protected void checkCompletion() throws SQLException {
+ if (!closeOnCompletion) {
+ return;
+ }
+
+ try (ResourceLock ignore = lock.obtain()) {
+ ResultWrapper result = firstUnclosedResult;
+ while (result != null) {
+ ResultSet resultSet = result.getResultSet();
+ if (resultSet != null && !resultSet.isClosed()) {
+ return;
+ }
+ result = result.getNext();
+ }
+ }
+
+ // prevent all ResultSet.close arising from Statement.close to loop here
+ closeOnCompletion = false;
+ try {
+ close();
+ } finally {
+ // restore the status if one rely on isCloseOnCompletion
+ closeOnCompletion = true;
+ }
+ }
+
+ @Override
+ public boolean getMoreResults(int current) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ // CLOSE_CURRENT_RESULT
+ if (current == Statement.CLOSE_CURRENT_RESULT && result != null
+ && result.getResultSet() != null) {
+ result.getResultSet().close();
+ }
+
+ // Advance resultset.
+ if (result != null) {
+ result = result.getNext();
+ }
+
+ // CLOSE_ALL_RESULTS
+ if (current == Statement.CLOSE_ALL_RESULTS) {
+ // Close preceding resultsets.
+ closeUnclosedProcessedResults();
+ }
+
+ // Done.
+ return result != null && result.getResultSet() != null;
+ }
+ }
+
+ @Override
+ public ResultSet getGeneratedKeys() throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ checkClosed();
+ if (generatedKeys == null || generatedKeys.getResultSet() == null) {
+ return createDriverResultSet(new Field[0], new ArrayList<>());
+ }
+
+ return generatedKeys.getResultSet();
+ }
+ }
+
+ @Override
+ public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
+ return executeUpdate(sql);
+ }
+
+ return executeUpdate(sql, (String[]) null);
+ }
+
+ @Override
+ public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ if (columnIndexes == null || columnIndexes.length == 0) {
+ return executeUpdate(sql);
+ }
+
+ throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
+ PSQLState.NOT_IMPLEMENTED);
+ }
+
+ @Override
+ public int executeUpdate(String sql, String [] columnNames) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (columnNames != null && columnNames.length == 0) {
+ return executeUpdate(sql);
+ }
+
+ wantsGeneratedKeysOnce = true;
+ if (!executeCachedSql(sql, 0, columnNames)) {
+ // no resultset returned. What's a pity!
+ }
+ return getUpdateCount();
+ }
+ }
+
+ @Override
+ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+ if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
+ return execute(sql);
+ }
+ return execute(sql, (String[]) null);
+ }
+
+ @Override
+ public boolean execute(String sql, int [] columnIndexes) throws SQLException {
+ if (columnIndexes != null && columnIndexes.length == 0) {
+ return execute(sql);
+ }
+
+ throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
+ PSQLState.NOT_IMPLEMENTED);
+ }
+
+ @Override
+ public boolean execute(String sql, String [] columnNames) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (columnNames != null && columnNames.length == 0) {
+ return execute(sql);
+ }
+
+ wantsGeneratedKeysOnce = true;
+ return executeCachedSql(sql, 0, columnNames);
+ }
+ }
+
+ @Override
+ public int getResultSetHoldability() throws SQLException {
+ return rsHoldability;
+ }
+
+ @Override
+ public ResultSet createDriverResultSet(Field[] fields, List tuples)
+ throws SQLException {
+ return createResultSet(null, fields, tuples, null);
+ }
+
+ protected void transformQueriesAndParameters() throws SQLException {
+ }
+
+ @Override
+ public void setAdaptiveFetch(boolean adaptiveFetch) {
+ this.adaptiveFetch = adaptiveFetch;
+ }
+
+ @Override
+ public boolean getAdaptiveFetch() {
+ return adaptiveFetch;
+ }
+
+ protected TimestampUtils getTimestampUtils() {
+ if (timestampUtils == null) {
+ timestampUtils = new TimestampUtils(!connection.getQueryExecutor().getIntegerDateTimes(), new QueryExecutorTimeZoneProvider(connection.getQueryExecutor()));
+ }
+ return timestampUtils;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java
new file mode 100644
index 0000000..6526a1d
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+/**
+ * Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only),
+ * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only.
+ *
+ * Note: this is for debugging purposes only.
+ *
+ * @see org.postgresql.PGProperty#PREFER_QUERY_MODE
+ */
+public enum PreferQueryMode {
+ SIMPLE("simple"),
+ EXTENDED_FOR_PREPARED("extendedForPrepared"),
+ EXTENDED("extended"),
+ EXTENDED_CACHE_EVERYTHING("extendedCacheEverything");
+
+ private final String value;
+
+ PreferQueryMode(String value) {
+ this.value = value;
+ }
+
+ public static PreferQueryMode of(String mode) {
+ for (PreferQueryMode preferQueryMode : values()) {
+ if (preferQueryMode.value.equals(mode)) {
+ return preferQueryMode;
+ }
+ }
+ return EXTENDED;
+ }
+
+ public String value() {
+ return value;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java b/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java
new file mode 100644
index 0000000..b2b4ff2
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import org.postgresql.core.Provider;
+import org.postgresql.core.QueryExecutor;
+import org.postgresql.util.GT;
+
+import java.util.TimeZone;
+
+/**
+ * This class workarounds Exception when
+ * indexing guava-30.0-jre.
+ * It looks like {@code jandex} does not support {@code new Interface<..>} with type annotations.
+ *
+ */
+class QueryExecutorTimeZoneProvider implements Provider {
+ private final QueryExecutor queryExecutor;
+
+ QueryExecutorTimeZoneProvider(QueryExecutor queryExecutor) {
+ this.queryExecutor = queryExecutor;
+ }
+
+ @Override
+ public TimeZone get() {
+ TimeZone timeZone = queryExecutor.getTimeZone();
+ if (timeZone == null) {
+ throw new IllegalStateException(
+ GT.tr("Backend timezone is not known. Backend should have returned TimeZone when "
+ + "establishing a connection")
+ );
+ }
+ return timeZone;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java
new file mode 100644
index 0000000..19ec31c
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * Extends a ReentrantLock for use in try-with-resources block.
+ *
+ * Example use
+ * {@code
+ *
+ * try (ResourceLock ignore = lock.obtain()) {
+ * // do something while holding the resource lock
+ * }
+ *
+ * }
+ */
+@SuppressWarnings("serial")
+public final class ResourceLock extends ReentrantLock implements AutoCloseable {
+
+ public ResourceLock() {
+ }
+
+ /**
+ * Obtain a lock and return the ResourceLock for use in try-with-resources block.
+ */
+ public ResourceLock obtain() {
+ lock();
+ return this;
+ }
+
+ /**
+ * Unlock on exit of try-with-resources block.
+ */
+ @Override
+ public void close() {
+ this.unlock();
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java
new file mode 100644
index 0000000..df79ae7
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+// Copyright (c) 2004, Open Cloud Limited.
+
+package org.postgresql.jdbc;
+
+import java.sql.ResultSet;
+
+/**
+ * Helper class that storing result info. This handles both the ResultSet and no-ResultSet result
+ * cases with a single interface for inspecting and stepping through them.
+ *
+ * @author Oliver Jowett (oliver@opencloud.com)
+ */
+public class ResultWrapper {
+ public ResultWrapper(ResultSet rs) {
+ this.rs = rs;
+ this.updateCount = -1;
+ this.insertOID = -1;
+ }
+
+ public ResultWrapper(long updateCount, long insertOID) {
+ this.rs = null;
+ this.updateCount = updateCount;
+ this.insertOID = insertOID;
+ }
+
+ public ResultSet getResultSet() {
+ return rs;
+ }
+
+ public long getUpdateCount() {
+ return updateCount;
+ }
+
+ public long getInsertOID() {
+ return insertOID;
+ }
+
+ public ResultWrapper getNext() {
+ return next;
+ }
+
+ public void append(ResultWrapper newResult) {
+ ResultWrapper tail = this;
+ while (tail.next != null) {
+ tail = tail.next;
+ }
+
+ tail.next = newResult;
+ }
+
+ private final ResultSet rs;
+ private final long updateCount;
+ private final long insertOID;
+ private ResultWrapper next;
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java
new file mode 100644
index 0000000..addea00
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import org.postgresql.PGProperty;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.util.Properties;
+
+public enum SslMode {
+ /**
+ * Do not use encrypted connections.
+ */
+ DISABLE("disable"),
+ /**
+ * Start with non-encrypted connection, then try encrypted one.
+ */
+ ALLOW("allow"),
+ /**
+ * Start with encrypted connection, fallback to non-encrypted (default).
+ */
+ PREFER("prefer"),
+ /**
+ * Ensure connection is encrypted.
+ */
+ REQUIRE("require"),
+ /**
+ * Ensure connection is encrypted, and client trusts server certificate.
+ */
+ VERIFY_CA("verify-ca"),
+ /**
+ * Ensure connection is encrypted, client trusts server certificate, and server hostname matches
+ * the one listed in the server certificate.
+ */
+ VERIFY_FULL("verify-full"),
+ ;
+
+ public static final SslMode[] VALUES = values();
+
+ public final String value;
+
+ SslMode(String value) {
+ this.value = value;
+ }
+
+ public boolean requireEncryption() {
+ return this.compareTo(REQUIRE) >= 0;
+ }
+
+ public boolean verifyCertificate() {
+ return this == VERIFY_CA || this == VERIFY_FULL;
+ }
+
+ public boolean verifyPeerName() {
+ return this == VERIFY_FULL;
+ }
+
+ public static SslMode of(Properties info) throws PSQLException {
+ String sslmode = PGProperty.SSL_MODE.getOrDefault(info);
+ // If sslmode is not set, fallback to ssl parameter
+ if (sslmode == null) {
+ if (PGProperty.SSL.getBoolean(info) || "".equals(PGProperty.SSL.getOrDefault(info))) {
+ return VERIFY_FULL;
+ }
+ return PREFER;
+ }
+
+ for (SslMode sslMode : VALUES) {
+ if (sslMode.value.equalsIgnoreCase(sslmode)) {
+ return sslMode;
+ }
+ }
+ throw new PSQLException(GT.tr("Invalid sslmode value: {0}", sslmode),
+ PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java
new file mode 100644
index 0000000..f149048
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+/**
+ * Represents {@link PgStatement#cancel()} state.
+ */
+enum StatementCancelState {
+ IDLE,
+ IN_QUERY,
+ CANCELING,
+ CANCELLED
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java
new file mode 100644
index 0000000..5da4624
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2023, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import java.util.TimerTask;
+
+/**
+ * Timer task that sends {@code statement.cancel()} signal to support {@link java.sql.Statement#setQueryTimeout(int)}.
+ * We explicitly nullify the reference to statement to help GC since {@code java.util.TimerThread}
+ * might keep reference to the latest executed task in its local variable.
+ */
+class StatementCancelTimerTask extends TimerTask {
+ private PgStatement statement;
+
+ StatementCancelTimerTask(PgStatement statement) {
+ this.statement = statement;
+ }
+
+ @Override
+ public boolean cancel() {
+ boolean result = super.cancel();
+ // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection
+ statement = null;
+ return result;
+ }
+
+ @Override
+ public void run() {
+ PgStatement statement = this.statement;
+ if (statement != null) {
+ statement.cancelIfStillNeeded(this);
+ }
+ // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection
+ this.statement = null;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java b/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java
new file mode 100644
index 0000000..57b1597
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java
@@ -0,0 +1,1716 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+
+import org.postgresql.PGStatement;
+import org.postgresql.core.JavaVersion;
+import org.postgresql.core.Oid;
+import org.postgresql.core.Provider;
+import org.postgresql.util.ByteConverter;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.lang.reflect.Field;
+import java.sql.Date;
+import java.sql.SQLException;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.time.Duration;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.OffsetTime;
+import java.time.ZoneOffset;
+import java.time.chrono.IsoEra;
+import java.time.format.DateTimeParseException;
+import java.time.temporal.ChronoField;
+import java.util.Calendar;
+import java.util.GregorianCalendar;
+import java.util.HashMap;
+import java.util.Objects;
+import java.util.SimpleTimeZone;
+import java.util.TimeZone;
+
+/**
+ * Misc utils for handling time and date values.
+ */
+@SuppressWarnings("try")
+public class TimestampUtils {
+ /**
+ * Number of milliseconds in one day.
+ */
+ private static final int ONEDAY = 24 * 3600 * 1000;
+ private static final char[] ZEROS = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
+ private static final char[][] NUMBERS;
+ private static final HashMap GMT_ZONES = new HashMap<>();
+ private static final int MAX_NANOS_BEFORE_WRAP_ON_ROUND = 999999500;
+ private static final Duration ONE_MICROSECOND = Duration.ofNanos(1000);
+ // LocalTime.MAX is 23:59:59.999_999_999, and it wraps to 24:00:00 when nanos exceed 999_999_499
+ // since PostgreSQL has microsecond resolution only
+ private static final LocalTime MAX_TIME = LocalTime.MAX.minus(Duration.ofNanos(500));
+ private static final OffsetDateTime MAX_OFFSET_DATETIME = OffsetDateTime.MAX.minus(Duration.ofMillis(500));
+ private static final LocalDateTime MAX_LOCAL_DATETIME = LocalDateTime.MAX.minus(Duration.ofMillis(500));
+ // low value for dates is 4713 BC
+ private static final LocalDate MIN_LOCAL_DATE = LocalDate.of(4713, 1, 1).with(ChronoField.ERA, IsoEra.BCE.getValue());
+ private static final LocalDateTime MIN_LOCAL_DATETIME = MIN_LOCAL_DATE.atStartOfDay();
+ private static final OffsetDateTime MIN_OFFSET_DATETIME = MIN_LOCAL_DATETIME.atOffset(ZoneOffset.UTC);
+ private static final Duration PG_EPOCH_DIFF =
+ Duration.between(Instant.EPOCH, LocalDate.of(2000, 1, 1).atStartOfDay().toInstant(ZoneOffset.UTC));
+
+ private static final Field DEFAULT_TIME_ZONE_FIELD;
+
+ private static final TimeZone UTC_TIMEZONE = TimeZone.getTimeZone(ZoneOffset.UTC);
+
+ private TimeZone prevDefaultZoneFieldValue;
+ private TimeZone defaultTimeZoneCache;
+
+ static {
+ // The expected maximum value is 60 (seconds), so 64 is used "just in case"
+ NUMBERS = new char[64][];
+ for (int i = 0; i < NUMBERS.length; i++) {
+ NUMBERS[i] = ((i < 10 ? "0" : "") + Integer.toString(i)).toCharArray();
+ }
+
+ // Backend's gmt-3 means GMT+03 in Java. Here a map is created so gmt-3 can be converted to
+ // java TimeZone
+ for (int i = -12; i <= 14; i++) {
+ TimeZone timeZone;
+ String pgZoneName;
+ if (i == 0) {
+ timeZone = TimeZone.getTimeZone("GMT");
+ pgZoneName = "GMT";
+ } else {
+ timeZone = TimeZone.getTimeZone("GMT" + (i <= 0 ? "+" : "-") + Math.abs(i));
+ pgZoneName = "GMT" + (i >= 0 ? "+" : "-");
+ }
+
+ if (i == 0) {
+ GMT_ZONES.put(pgZoneName, timeZone);
+ continue;
+ }
+ GMT_ZONES.put(pgZoneName + Math.abs(i), timeZone);
+ GMT_ZONES.put(pgZoneName + new String(NUMBERS[Math.abs(i)]), timeZone);
+ }
+ // Fast path to getting the default timezone.
+ // Accessing the default timezone over and over creates a clone with regular API.
+ // Because we don't mutate that object in our use of it, we can access the field directly.
+ // This saves the creation of a clone everytime, and the memory associated to all these clones.
+ Field tzField;
+ try {
+ tzField = null;
+ // Avoid reflective access in Java 9+
+ if (JavaVersion.getRuntimeVersion().compareTo(JavaVersion.v1_8) <= 0) {
+ tzField = TimeZone.class.getDeclaredField("defaultTimeZone");
+ tzField.setAccessible(true);
+ TimeZone defaultTz = TimeZone.getDefault();
+ Object tzFromField = tzField.get(null);
+ if (defaultTz == null || !defaultTz.equals(tzFromField)) {
+ tzField = null;
+ }
+ }
+ } catch (Exception e) {
+ tzField = null;
+ }
+ DEFAULT_TIME_ZONE_FIELD = tzField;
+ }
+
+ private final StringBuilder sbuf = new StringBuilder();
+
+ // This calendar is used when user provides calendar in setX(, Calendar) method.
+ // It ensures calendar is Gregorian.
+ private final Calendar calendarWithUserTz = new GregorianCalendar();
+
+ private Calendar calCache;
+ private ZoneOffset calCacheZone;
+
+ /**
+ * True if the backend uses doubles for time values. False if long is used.
+ */
+ private final boolean usesDouble;
+ private final Provider timeZoneProvider;
+ private final ResourceLock lock = new ResourceLock();
+
+ public TimestampUtils(boolean usesDouble, Provider timeZoneProvider) {
+ this.usesDouble = usesDouble;
+ this.timeZoneProvider = timeZoneProvider;
+ }
+
+ private Calendar getCalendar(ZoneOffset offset) {
+ if (calCache != null && Objects.equals(offset, calCacheZone)) {
+ return calCache;
+ }
+
+ // normally we would use:
+ // calCache = new GregorianCalendar(TimeZone.getTimeZone(offset));
+ // But this seems to cause issues for some crazy offsets as returned by server for BC dates!
+ final String tzid = offset.getTotalSeconds() == 0 ? "UTC" : "GMT".concat(offset.getId());
+ final TimeZone syntheticTZ = new SimpleTimeZone(offset.getTotalSeconds() * 1000, tzid);
+ calCache = new GregorianCalendar(syntheticTZ);
+ calCacheZone = offset;
+ return calCache;
+ }
+
+ private static class ParsedTimestamp {
+ boolean hasDate;
+ int era = GregorianCalendar.AD;
+ int year = 1970;
+ int month = 1;
+
+ boolean hasTime;
+ int day = 1;
+ int hour;
+ int minute;
+ int second;
+ int nanos;
+
+ boolean hasOffset;
+ ZoneOffset offset = ZoneOffset.UTC;
+ }
+
+ private static class ParsedBinaryTimestamp {
+ Infinity infinity;
+ long millis;
+ int nanos;
+ }
+
+ enum Infinity {
+ POSITIVE,
+ NEGATIVE
+ }
+
+ /**
+ * Load date/time information into the provided calendar returning the fractional seconds.
+ */
+ private ParsedTimestamp parseBackendTimestamp(String str) throws SQLException {
+ char[] s = str.toCharArray();
+ int slen = s.length;
+
+ // This is pretty gross..
+ ParsedTimestamp result = new ParsedTimestamp();
+
+ // We try to parse these fields in order; all are optional
+ // (but some combinations don't make sense, e.g. if you have
+ // both date and time then they must be whitespace-separated).
+ // At least one of date and time must be present.
+
+ // leading whitespace
+ // yyyy-mm-dd
+ // whitespace
+ // hh:mm:ss
+ // whitespace
+ // timezone in one of the formats: +hh, -hh, +hh:mm, -hh:mm
+ // whitespace
+ // if date is present, an era specifier: AD or BC
+ // trailing whitespace
+
+ try {
+ int start = skipWhitespace(s, 0); // Skip leading whitespace
+ int end = firstNonDigit(s, start);
+ int num;
+ char sep;
+
+ // Possibly read date.
+ if (charAt(s, end) == '-') {
+ //
+ // Date
+ //
+ result.hasDate = true;
+
+ // year
+ result.year = number(s, start, end);
+ start = end + 1; // Skip '-'
+
+ // month
+ end = firstNonDigit(s, start);
+ result.month = number(s, start, end);
+
+ sep = charAt(s, end);
+ if (sep != '-') {
+ throw new NumberFormatException("Expected date to be dash-separated, got '" + sep + "'");
+ }
+
+ start = end + 1; // Skip '-'
+
+ // day of month
+ end = firstNonDigit(s, start);
+ result.day = number(s, start, end);
+
+ start = skipWhitespace(s, end); // Skip trailing whitespace
+ }
+
+ // Possibly read time.
+ if (Character.isDigit(charAt(s, start))) {
+ //
+ // Time.
+ //
+
+ result.hasTime = true;
+
+ // Hours
+
+ end = firstNonDigit(s, start);
+ result.hour = number(s, start, end);
+
+ sep = charAt(s, end);
+ if (sep != ':') {
+ throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'");
+ }
+
+ start = end + 1; // Skip ':'
+
+ // minutes
+
+ end = firstNonDigit(s, start);
+ result.minute = number(s, start, end);
+
+ sep = charAt(s, end);
+ if (sep != ':') {
+ throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'");
+ }
+
+ start = end + 1; // Skip ':'
+
+ // seconds
+
+ end = firstNonDigit(s, start);
+ result.second = number(s, start, end);
+ start = end;
+
+ // Fractional seconds.
+ if (charAt(s, start) == '.') {
+ end = firstNonDigit(s, start + 1); // Skip '.'
+ num = number(s, start + 1, end);
+
+ for (int numlength = end - (start + 1); numlength < 9; numlength++) {
+ num *= 10;
+ }
+
+ result.nanos = num;
+ start = end;
+ }
+
+ start = skipWhitespace(s, start); // Skip trailing whitespace
+ }
+
+ // Possibly read timezone.
+ sep = charAt(s, start);
+ if (sep == '-' || sep == '+') {
+ result.hasOffset = true;
+
+ int tzsign = sep == '-' ? -1 : 1;
+ int tzhr;
+ int tzmin;
+ int tzsec;
+
+ end = firstNonDigit(s, start + 1); // Skip +/-
+ tzhr = number(s, start + 1, end);
+ start = end;
+
+ sep = charAt(s, start);
+ if (sep == ':') {
+ end = firstNonDigit(s, start + 1); // Skip ':'
+ tzmin = number(s, start + 1, end);
+ start = end;
+ } else {
+ tzmin = 0;
+ }
+
+ tzsec = 0;
+ sep = charAt(s, start);
+ if (sep == ':') {
+ end = firstNonDigit(s, start + 1); // Skip ':'
+ tzsec = number(s, start + 1, end);
+ start = end;
+ }
+
+ result.offset = ZoneOffset.ofHoursMinutesSeconds(tzsign * tzhr, tzsign * tzmin, tzsign * tzsec);
+
+ start = skipWhitespace(s, start); // Skip trailing whitespace
+ }
+
+ if (result.hasDate && start < slen) {
+ String eraString = new String(s, start, slen - start);
+ if (eraString.startsWith("AD")) {
+ result.era = GregorianCalendar.AD;
+ start += 2;
+ } else if (eraString.startsWith("BC")) {
+ result.era = GregorianCalendar.BC;
+ start += 2;
+ }
+ }
+
+ if (start < slen) {
+ throw new NumberFormatException(
+ "Trailing junk on timestamp: '" + new String(s, start, slen - start) + "'");
+ }
+
+ if (!result.hasTime && !result.hasDate) {
+ throw new NumberFormatException("Timestamp has neither date nor time");
+ }
+
+ } catch (NumberFormatException nfe) {
+ throw new PSQLException(
+ GT.tr("Bad value for type timestamp/date/time: {0}", str),
+ PSQLState.BAD_DATETIME_FORMAT, nfe);
+ }
+
+ return result;
+ }
+
+ /**
+ * Parse a string and return a timestamp representing its value.
+ *
+ * @param cal calendar to be used to parse the input string
+ * @param s The ISO formated date string to parse.
+ * @return null if s is null or a timestamp of the parsed string s.
+ * @throws SQLException if there is a problem parsing s.
+ */
+ public Timestamp toTimestamp(Calendar cal,
+ String s) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (s == null) {
+ return null;
+ }
+
+ int slen = s.length();
+
+ // convert postgres's infinity values to internal infinity magic value
+ if (slen == 8 && "infinity".equals(s)) {
+ return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
+ }
+
+ if (slen == 9 && "-infinity".equals(s)) {
+ return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
+ }
+
+ ParsedTimestamp ts = parseBackendTimestamp(s);
+ Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal);
+ useCal.set(Calendar.ERA, ts.era);
+ useCal.set(Calendar.YEAR, ts.year);
+ useCal.set(Calendar.MONTH, ts.month - 1);
+ useCal.set(Calendar.DAY_OF_MONTH, ts.day);
+ useCal.set(Calendar.HOUR_OF_DAY, ts.hour);
+ useCal.set(Calendar.MINUTE, ts.minute);
+ useCal.set(Calendar.SECOND, ts.second);
+ useCal.set(Calendar.MILLISECOND, 0);
+
+ Timestamp result = new Timestamp(useCal.getTimeInMillis());
+ result.setNanos(ts.nanos);
+ return result;
+ }
+ }
+
+ /**
+ * Parse a string and return a LocalTime representing its value.
+ *
+ * @param s The ISO formated time string to parse.
+ * @return null if s is null or a LocalTime of the parsed string s.
+ * @throws SQLException if there is a problem parsing s.
+ */
+ public LocalTime toLocalTime(String s) throws SQLException {
+ if (s == null) {
+ return null;
+ }
+
+ if ("24:00:00".equals(s)) {
+ return LocalTime.MAX;
+ }
+
+ try {
+ return LocalTime.parse(s);
+ } catch (DateTimeParseException nfe) {
+ throw new PSQLException(
+ GT.tr("Bad value for type timestamp/date/time: {0}", s),
+ PSQLState.BAD_DATETIME_FORMAT, nfe);
+ }
+
+ }
+
+ /**
+ * Returns the offset time object matching the given bytes with Oid#TIMETZ or Oid#TIME.
+ *
+ * @param bytes The binary encoded TIMETZ/TIME value.
+ * @return The parsed offset time object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public OffsetTime toOffsetTimeBin(byte[] bytes) throws PSQLException {
+ if (bytes.length != 12) {
+ throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
+ PSQLState.BAD_DATETIME_FORMAT);
+ }
+
+ final long micros;
+
+ if (usesDouble) {
+ double seconds = ByteConverter.float8(bytes, 0);
+ micros = (long) (seconds * 1_000_000d);
+ } else {
+ micros = ByteConverter.int8(bytes, 0);
+ }
+
+ // postgres offset is negative, so we have to flip sign:
+ final ZoneOffset timeOffset = ZoneOffset.ofTotalSeconds(-ByteConverter.int4(bytes, 8));
+
+ return OffsetTime.of(LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L)), timeOffset);
+ }
+
+ /**
+ * Parse a string and return a OffsetTime representing its value.
+ *
+ * @param s The ISO formated time string to parse.
+ * @return null if s is null or a OffsetTime of the parsed string s.
+ * @throws SQLException if there is a problem parsing s.
+ */
+ public OffsetTime toOffsetTime(String s) throws SQLException {
+ if (s == null) {
+ return null;
+ }
+
+ if (s.startsWith("24:00:00")) {
+ return OffsetTime.MAX;
+ }
+
+ final ParsedTimestamp ts = parseBackendTimestamp(s);
+ return OffsetTime.of(ts.hour, ts.minute, ts.second, ts.nanos, ts.offset);
+ }
+
+ /**
+ * Parse a string and return a LocalDateTime representing its value.
+ *
+ * @param s The ISO formated date string to parse.
+ * @return null if s is null or a LocalDateTime of the parsed string s.
+ * @throws SQLException if there is a problem parsing s.
+ */
+ public LocalDateTime toLocalDateTime(String s) throws SQLException {
+ if (s == null) {
+ return null;
+ }
+
+ int slen = s.length();
+
+ // convert postgres's infinity values to internal infinity magic value
+ if (slen == 8 && "infinity".equals(s)) {
+ return LocalDateTime.MAX;
+ }
+
+ if (slen == 9 && "-infinity".equals(s)) {
+ return LocalDateTime.MIN;
+ }
+
+ ParsedTimestamp ts = parseBackendTimestamp(s);
+
+ // intentionally ignore time zone
+ // 2004-10-19 10:23:54+03:00 is 2004-10-19 10:23:54 locally
+ LocalDateTime result = LocalDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos);
+ if (ts.era == GregorianCalendar.BC) {
+ return result.with(ChronoField.ERA, IsoEra.BCE.getValue());
+ } else {
+ return result;
+ }
+ }
+
+ /**
+ * Returns the offset date time object matching the given bytes with Oid#TIMETZ.
+ * Not used internally anymore, function is here to retain compatibility with previous versions
+ *
+ * @param t the time value
+ * @return the matching offset date time
+ * @deprecated was used internally, and not used anymore
+ */
+ @Deprecated
+ public OffsetDateTime toOffsetDateTime(Time t) {
+ // hardcode utc because the backend does not provide us the timezone
+ // hardcode UNIX epoch, JDBC requires OffsetDateTime but doesn't describe what date should be used
+ return t.toLocalTime().atDate(LocalDate.of(1970, 1, 1)).atOffset(ZoneOffset.UTC);
+ }
+
+ /**
+ * Parse a string and return a OffsetDateTime representing its value.
+ *
+ * @param s The ISO formatted date string to parse.
+ * @return null if s is null or a OffsetDateTime of the parsed string s.
+ * @throws SQLException if there is a problem parsing s.
+ */
+ public OffsetDateTime toOffsetDateTime(
+ String s) throws SQLException {
+ if (s == null) {
+ return null;
+ }
+
+ int slen = s.length();
+
+ // convert postgres's infinity values to internal infinity magic value
+ if (slen == 8 && "infinity".equals(s)) {
+ return OffsetDateTime.MAX;
+ }
+
+ if (slen == 9 && "-infinity".equals(s)) {
+ return OffsetDateTime.MIN;
+ }
+
+ final ParsedTimestamp ts = parseBackendTimestamp(s);
+ OffsetDateTime result =
+ OffsetDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos, ts.offset);
+ if (ts.era == GregorianCalendar.BC) {
+ return result.with(ChronoField.ERA, IsoEra.BCE.getValue());
+ } else {
+ return result;
+ }
+ }
+
+ /**
+ * Returns the offset date time object matching the given bytes with Oid#TIMESTAMPTZ.
+ *
+ * @param bytes The binary encoded local date time value.
+ * @return The parsed local date time object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public OffsetDateTime toOffsetDateTimeBin(byte[] bytes) throws PSQLException {
+ ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes);
+ if (parsedTimestamp.infinity == Infinity.POSITIVE) {
+ return OffsetDateTime.MAX;
+ } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
+ return OffsetDateTime.MIN;
+ }
+
+ // hardcode utc because the backend does not provide us the timezone
+ // Postgres is always UTC
+ Instant instant = Instant.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos);
+ return OffsetDateTime.ofInstant(instant, ZoneOffset.UTC);
+ }
+
+ public Time toTime(
+ Calendar cal, String s) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ // 1) Parse backend string
+ if (s == null) {
+ return null;
+ }
+ ParsedTimestamp ts = parseBackendTimestamp(s);
+ Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal);
+ if (!ts.hasOffset) {
+ // When no time zone provided (e.g. time or timestamp)
+ // We get the year-month-day from the string, then truncate the day to 1970-01-01
+ // This is used for timestamp -> time conversion
+ // Note: this cannot be merged with "else" branch since
+ // timestamps at which the time flips to/from DST depend on the date
+ // For instance, 2000-03-26 02:00:00 is invalid timestamp in Europe/Moscow time zone
+ // and the valid one is 2000-03-26 03:00:00. That is why we parse full timestamp
+ // then set year to 1970 later
+ useCal.set(Calendar.ERA, ts.era);
+ useCal.set(Calendar.YEAR, ts.year);
+ useCal.set(Calendar.MONTH, ts.month - 1);
+ useCal.set(Calendar.DAY_OF_MONTH, ts.day);
+ } else {
+ // When time zone is given, we just pick the time part and assume date to be 1970-01-01
+ // this is used for time, timez, and timestamptz parsing
+ useCal.set(Calendar.ERA, GregorianCalendar.AD);
+ useCal.set(Calendar.YEAR, 1970);
+ useCal.set(Calendar.MONTH, Calendar.JANUARY);
+ useCal.set(Calendar.DAY_OF_MONTH, 1);
+ }
+ useCal.set(Calendar.HOUR_OF_DAY, ts.hour);
+ useCal.set(Calendar.MINUTE, ts.minute);
+ useCal.set(Calendar.SECOND, ts.second);
+ useCal.set(Calendar.MILLISECOND, 0);
+
+ long timeMillis = useCal.getTimeInMillis() + ts.nanos / 1000000;
+ if (ts.hasOffset || (ts.year == 1970 && ts.era == GregorianCalendar.AD)) {
+ // time with time zone has proper time zone, so the value can be returned as is
+ return new Time(timeMillis);
+ }
+
+ // 2) Truncate date part so in given time zone the date would be formatted as 01/01/1970
+ return convertToTime(timeMillis, useCal.getTimeZone());
+ }
+ }
+
+ public Date toDate(Calendar cal,
+ String s) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ // 1) Parse backend string
+ Timestamp timestamp = toTimestamp(cal, s);
+
+ if (timestamp == null) {
+ return null;
+ }
+
+ // Note: infinite dates are handled in convertToDate
+ // 2) Truncate date part so in given time zone the date would be formatted as 00:00
+ return convertToDate(timestamp.getTime(), cal == null ? null : cal.getTimeZone());
+ }
+ }
+
+ private Calendar setupCalendar(Calendar cal) {
+ TimeZone timeZone = cal == null ? null : cal.getTimeZone();
+ return getSharedCalendar(timeZone);
+ }
+
+ /**
+ * Get a shared calendar, applying the supplied time zone or the default time zone if null.
+ *
+ * @param timeZone time zone to be set for the calendar
+ * @return The shared calendar.
+ */
+ public Calendar getSharedCalendar(TimeZone timeZone) {
+ if (timeZone == null) {
+ timeZone = getDefaultTz();
+ }
+ Calendar tmp = calendarWithUserTz;
+ tmp.setTimeZone(timeZone);
+ return tmp;
+ }
+
+ /**
+ * Returns true when microsecond part of the time should be increased
+ * when rounding to microseconds
+ * @param nanos nanosecond part of the time
+ * @return true when microsecond part of the time should be increased when rounding to microseconds
+ */
+ private static boolean nanosExceed499(int nanos) {
+ return nanos % 1000 > 499;
+ }
+
+ public String toString(Calendar cal, Timestamp x) {
+ return toString(cal, x, true);
+ }
+
+ public String toString(Calendar cal, Timestamp x,
+ boolean withTimeZone) {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) {
+ return "infinity";
+ } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) {
+ return "-infinity";
+ }
+
+ cal = setupCalendar(cal);
+ long timeMillis = x.getTime();
+
+ // Round to microseconds
+ int nanos = x.getNanos();
+ if (nanos >= MAX_NANOS_BEFORE_WRAP_ON_ROUND) {
+ nanos = 0;
+ timeMillis++;
+ } else if (nanosExceed499(nanos)) {
+ // PostgreSQL does not support nanosecond resolution yet, and appendTime will just ignore
+ // 0..999 part of the nanoseconds, however we subtract nanos % 1000 to make the value
+ // a little bit saner for debugging reasons
+ nanos += 1000 - nanos % 1000;
+ }
+ cal.setTimeInMillis(timeMillis);
+
+ sbuf.setLength(0);
+
+ appendDate(sbuf, cal);
+ sbuf.append(' ');
+ appendTime(sbuf, cal, nanos);
+ if (withTimeZone) {
+ appendTimeZone(sbuf, cal);
+ }
+ appendEra(sbuf, cal);
+
+ return sbuf.toString();
+ }
+ }
+
+ public String toString(Calendar cal, Date x) {
+ return toString(cal, x, true);
+ }
+
+ public String toString(Calendar cal, Date x,
+ boolean withTimeZone) {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) {
+ return "infinity";
+ } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) {
+ return "-infinity";
+ }
+
+ cal = setupCalendar(cal);
+ cal.setTime(x);
+
+ sbuf.setLength(0);
+
+ appendDate(sbuf, cal);
+ appendEra(sbuf, cal);
+ if (withTimeZone) {
+ sbuf.append(' ');
+ appendTimeZone(sbuf, cal);
+ }
+
+ return sbuf.toString();
+ }
+ }
+
+ public String toString(Calendar cal, Time x) {
+ return toString(cal, x, true);
+ }
+
+ public String toString(Calendar cal, Time x,
+ boolean withTimeZone) {
+ try (ResourceLock ignore = lock.obtain()) {
+ cal = setupCalendar(cal);
+ cal.setTime(x);
+
+ sbuf.setLength(0);
+
+ appendTime(sbuf, cal, cal.get(Calendar.MILLISECOND) * 1000000);
+
+ // The 'time' parser for <= 7.3 doesn't like timezones.
+ if (withTimeZone) {
+ appendTimeZone(sbuf, cal);
+ }
+
+ return sbuf.toString();
+ }
+ }
+
+ private static void appendDate(StringBuilder sb, Calendar cal) {
+ int year = cal.get(Calendar.YEAR);
+ int month = cal.get(Calendar.MONTH) + 1;
+ int day = cal.get(Calendar.DAY_OF_MONTH);
+ appendDate(sb, year, month, day);
+ }
+
+ private static void appendDate(StringBuilder sb, int year, int month, int day) {
+ // always use at least four digits for the year so very
+ // early years, like 2, don't get misinterpreted
+ //
+ int prevLength = sb.length();
+ sb.append(year);
+ int leadingZerosForYear = 4 - (sb.length() - prevLength);
+ if (leadingZerosForYear > 0) {
+ sb.insert(prevLength, ZEROS, 0, leadingZerosForYear);
+ }
+
+ sb.append('-');
+ sb.append(NUMBERS[month]);
+ sb.append('-');
+ sb.append(NUMBERS[day]);
+ }
+
+ private static void appendTime(StringBuilder sb, Calendar cal, int nanos) {
+ int hours = cal.get(Calendar.HOUR_OF_DAY);
+ int minutes = cal.get(Calendar.MINUTE);
+ int seconds = cal.get(Calendar.SECOND);
+ appendTime(sb, hours, minutes, seconds, nanos);
+ }
+
+ /**
+ * Appends time part to the {@code StringBuilder} in PostgreSQL-compatible format.
+ * The function truncates {@param nanos} to microseconds. The value is expected to be rounded
+ * beforehand.
+ * @param sb destination
+ * @param hours hours
+ * @param minutes minutes
+ * @param seconds seconds
+ * @param nanos nanoseconds
+ */
+ private static void appendTime(StringBuilder sb, int hours, int minutes, int seconds, int nanos) {
+ sb.append(NUMBERS[hours]);
+
+ sb.append(':');
+ sb.append(NUMBERS[minutes]);
+
+ sb.append(':');
+ sb.append(NUMBERS[seconds]);
+
+ // Add nanoseconds.
+ // This won't work for server versions < 7.2 which only want
+ // a two digit fractional second, but we don't need to support 7.1
+ // anymore and getting the version number here is difficult.
+ //
+ if (nanos < 1000) {
+ return;
+ }
+ sb.append('.');
+ int len = sb.length();
+ sb.append(nanos / 1000); // append microseconds
+ int needZeros = 6 - (sb.length() - len);
+ if (needZeros > 0) {
+ sb.insert(len, ZEROS, 0, needZeros);
+ }
+
+ int end = sb.length() - 1;
+ while (sb.charAt(end) == '0') {
+ sb.deleteCharAt(end);
+ end--;
+ }
+ }
+
+ private void appendTimeZone(StringBuilder sb, Calendar cal) {
+ int offset = (cal.get(Calendar.ZONE_OFFSET) + cal.get(Calendar.DST_OFFSET)) / 1000;
+
+ appendTimeZone(sb, offset);
+ }
+
+ private void appendTimeZone(StringBuilder sb, int offset) {
+ int absoff = Math.abs(offset);
+ int hours = absoff / 60 / 60;
+ int mins = (absoff - hours * 60 * 60) / 60;
+ int secs = absoff - hours * 60 * 60 - mins * 60;
+
+ sb.append(offset >= 0 ? "+" : "-");
+
+ sb.append(NUMBERS[hours]);
+
+ if (mins == 0 && secs == 0) {
+ return;
+ }
+ sb.append(':');
+
+ sb.append(NUMBERS[mins]);
+
+ if (secs != 0) {
+ sb.append(':');
+ sb.append(NUMBERS[secs]);
+ }
+ }
+
+ private static void appendEra(StringBuilder sb, Calendar cal) {
+ if (cal.get(Calendar.ERA) == GregorianCalendar.BC) {
+ sb.append(" BC");
+ }
+ }
+
+ public String toString(LocalDate localDate) {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (LocalDate.MAX.equals(localDate)) {
+ return "infinity";
+ } else if (localDate.isBefore(MIN_LOCAL_DATE)) {
+ return "-infinity";
+ }
+
+ sbuf.setLength(0);
+
+ appendDate(sbuf, localDate);
+ appendEra(sbuf, localDate);
+
+ return sbuf.toString();
+ }
+ }
+
+ public String toString(LocalTime localTime) {
+ try (ResourceLock ignore = lock.obtain()) {
+ sbuf.setLength(0);
+
+ if (localTime.isAfter(MAX_TIME)) {
+ return "24:00:00";
+ }
+
+ int nano = localTime.getNano();
+ if (nanosExceed499(nano)) {
+ // Technically speaking this is not a proper rounding, however
+ // it relies on the fact that appendTime just truncates 000..999 nanosecond part
+ localTime = localTime.plus(ONE_MICROSECOND);
+ }
+ appendTime(sbuf, localTime);
+
+ return sbuf.toString();
+ }
+ }
+
+ public String toString(OffsetTime offsetTime) {
+ try (ResourceLock ignore = lock.obtain()) {
+ sbuf.setLength(0);
+
+ final LocalTime localTime = offsetTime.toLocalTime();
+ if (localTime.isAfter(MAX_TIME)) {
+ sbuf.append("24:00:00");
+ appendTimeZone(sbuf, offsetTime.getOffset());
+ return sbuf.toString();
+ }
+
+ int nano = offsetTime.getNano();
+ if (nanosExceed499(nano)) {
+ // Technically speaking this is not a proper rounding, however
+ // it relies on the fact that appendTime just truncates 000..999 nanosecond part
+ offsetTime = offsetTime.plus(ONE_MICROSECOND);
+ }
+ appendTime(sbuf, localTime);
+ appendTimeZone(sbuf, offsetTime.getOffset());
+
+ return sbuf.toString();
+ }
+ }
+
+ /**
+ * Converts {@code timetz} to string taking client time zone ({@link #timeZoneProvider})
+ * into account.
+ * @param value binary representation of {@code timetz}
+ * @return string representation of {@code timetz}
+ */
+ public String toStringOffsetTimeBin(byte[] value) throws PSQLException {
+ OffsetTime offsetTimeBin = toOffsetTimeBin(value);
+ return toString(withClientOffsetSameInstant(offsetTimeBin));
+ }
+
+ /**
+ * PostgreSQL does not store the time zone in the binary representation of timetz.
+ * However, we want to preserve the output of {@code getString()} in both binary and text formats
+ * So we try a client time zone when serializing {@link OffsetTime} to string.
+ * @param input input offset time
+ * @return adjusted offset time (it represents the same instant as the input one)
+ */
+ public OffsetTime withClientOffsetSameInstant(OffsetTime input) {
+ if (input == OffsetTime.MAX || input == OffsetTime.MIN) {
+ return input;
+ }
+ TimeZone timeZone = timeZoneProvider.get();
+ int offsetMillis = timeZone.getRawOffset();
+ return input.withOffsetSameInstant(
+ offsetMillis == 0
+ ? ZoneOffset.UTC
+ : ZoneOffset.ofTotalSeconds(offsetMillis / 1000));
+ }
+
+ public String toString(OffsetDateTime offsetDateTime) {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (offsetDateTime.isAfter(MAX_OFFSET_DATETIME)) {
+ return "infinity";
+ } else if (offsetDateTime.isBefore(MIN_OFFSET_DATETIME)) {
+ return "-infinity";
+ }
+
+ sbuf.setLength(0);
+
+ int nano = offsetDateTime.getNano();
+ if (nanosExceed499(nano)) {
+ // Technically speaking this is not a proper rounding, however
+ // it relies on the fact that appendTime just truncates 000..999 nanosecond part
+ offsetDateTime = offsetDateTime.plus(ONE_MICROSECOND);
+ }
+ LocalDateTime localDateTime = offsetDateTime.toLocalDateTime();
+ LocalDate localDate = localDateTime.toLocalDate();
+ appendDate(sbuf, localDate);
+ sbuf.append(' ');
+ appendTime(sbuf, localDateTime.toLocalTime());
+ appendTimeZone(sbuf, offsetDateTime.getOffset());
+ appendEra(sbuf, localDate);
+
+ return sbuf.toString();
+ }
+ }
+
+ /**
+ * Converts {@code timestamptz} to string taking client time zone ({@link #timeZoneProvider})
+ * into account.
+ * @param value binary representation of {@code timestamptz}
+ * @return string representation of {@code timestamptz}
+ */
+ public String toStringOffsetDateTime(byte[] value) throws PSQLException {
+ OffsetDateTime offsetDateTime = toOffsetDateTimeBin(value);
+ return toString(withClientOffsetSameInstant(offsetDateTime));
+ }
+
+ /**
+ * PostgreSQL does not store the time zone in the binary representation of timestamptz.
+ * However, we want to preserve the output of {@code getString()} in both binary and text formats
+ * So we try a client time zone when serializing {@link OffsetDateTime} to string.
+ * @param input input offset date time
+ * @return adjusted offset date time (it represents the same instant as the input one)
+ */
+ public OffsetDateTime withClientOffsetSameInstant(OffsetDateTime input) {
+ if (input == OffsetDateTime.MAX || input == OffsetDateTime.MIN) {
+ return input;
+ }
+ int offsetMillis;
+ TimeZone timeZone = timeZoneProvider.get();
+ if (isSimpleTimeZone(timeZone.getID())) {
+ offsetMillis = timeZone.getRawOffset();
+ } else {
+ offsetMillis = timeZone.getOffset(input.toEpochSecond() * 1000L);
+ }
+ return input.withOffsetSameInstant(
+ offsetMillis == 0
+ ? ZoneOffset.UTC
+ : ZoneOffset.ofTotalSeconds(offsetMillis / 1000));
+ }
+
+ /**
+ * Formats {@link LocalDateTime} to be sent to the backend, thus it adds time zone.
+ * Do not use this method in {@link java.sql.ResultSet#getString(int)}
+ * @param localDateTime The local date to format as a String
+ * @return The formatted local date
+ */
+ public String toString(LocalDateTime localDateTime) {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (localDateTime.isAfter(MAX_LOCAL_DATETIME)) {
+ return "infinity";
+ } else if (localDateTime.isBefore(MIN_LOCAL_DATETIME)) {
+ return "-infinity";
+ }
+
+ sbuf.setLength(0);
+
+ if (nanosExceed499(localDateTime.getNano())) {
+ localDateTime = localDateTime.plus(ONE_MICROSECOND);
+ }
+
+ LocalDate localDate = localDateTime.toLocalDate();
+ appendDate(sbuf, localDate);
+ sbuf.append(' ');
+ appendTime(sbuf, localDateTime.toLocalTime());
+ appendEra(sbuf, localDate);
+
+ return sbuf.toString();
+ }
+ }
+
+ private static void appendDate(StringBuilder sb, LocalDate localDate) {
+ int year = localDate.get(ChronoField.YEAR_OF_ERA);
+ int month = localDate.getMonthValue();
+ int day = localDate.getDayOfMonth();
+ appendDate(sb, year, month, day);
+ }
+
+ private static void appendTime(StringBuilder sb, LocalTime localTime) {
+ int hours = localTime.getHour();
+ int minutes = localTime.getMinute();
+ int seconds = localTime.getSecond();
+ int nanos = localTime.getNano();
+ appendTime(sb, hours, minutes, seconds, nanos);
+ }
+
+ private void appendTimeZone(StringBuilder sb, ZoneOffset offset) {
+ int offsetSeconds = offset.getTotalSeconds();
+
+ appendTimeZone(sb, offsetSeconds);
+ }
+
+ private static void appendEra(StringBuilder sb, LocalDate localDate) {
+ if (localDate.get(ChronoField.ERA) == IsoEra.BCE.getValue()) {
+ sb.append(" BC");
+ }
+ }
+
+ @SuppressWarnings("deprecation")
+ private static int skipWhitespace(char[] s, int start) {
+ int slen = s.length;
+ for (int i = start; i < slen; i++) {
+ if (!Character.isSpace(s[i])) {
+ return i;
+ }
+ }
+ return slen;
+ }
+
+ private static int firstNonDigit(char[] s, int start) {
+ int slen = s.length;
+ for (int i = start; i < slen; i++) {
+ if (!Character.isDigit(s[i])) {
+ return i;
+ }
+ }
+ return slen;
+ }
+
+ private static int number(char[] s, int start, int end) {
+ if (start >= end) {
+ throw new NumberFormatException();
+ }
+ int n = 0;
+ for (int i = start; i < end; i++) {
+ n = 10 * n + (s[i] - '0');
+ }
+ return n;
+ }
+
+ private static char charAt(char[] s, int pos) {
+ if (pos >= 0 && pos < s.length) {
+ return s[pos];
+ }
+ return '\0';
+ }
+
+ /**
+ * Returns the SQL Date object matching the given bytes with {@link Oid#DATE}.
+ *
+ * @param tz The timezone used.
+ * @param bytes The binary encoded date value.
+ * @return The parsed date object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public Date toDateBin(TimeZone tz, byte[] bytes) throws PSQLException {
+ if (bytes.length != 4) {
+ throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"),
+ PSQLState.BAD_DATETIME_FORMAT);
+ }
+ int days = ByteConverter.int4(bytes, 0);
+ if (tz == null) {
+ tz = getDefaultTz();
+ }
+ long secs = toJavaSecs(days * 86400L);
+ long millis = secs * 1000L;
+
+ if (millis <= PGStatement.DATE_NEGATIVE_SMALLER_INFINITY) {
+ millis = PGStatement.DATE_NEGATIVE_INFINITY;
+ } else if (millis >= PGStatement.DATE_POSITIVE_SMALLER_INFINITY) {
+ millis = PGStatement.DATE_POSITIVE_INFINITY;
+ } else {
+ // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
+ // time
+
+ millis = guessTimestamp(millis, tz);
+ }
+ return new Date(millis);
+ }
+
+ private TimeZone getDefaultTz() {
+ // Fast path to getting the default timezone.
+ if (DEFAULT_TIME_ZONE_FIELD != null) {
+ try {
+ TimeZone defaultTimeZone = (TimeZone) DEFAULT_TIME_ZONE_FIELD.get(null);
+ if (defaultTimeZone == prevDefaultZoneFieldValue) {
+ return defaultTimeZoneCache;
+ }
+ prevDefaultZoneFieldValue = defaultTimeZone;
+ } catch (Exception e) {
+ // If this were to fail, fallback on slow method.
+ }
+ }
+ TimeZone tz = TimeZone.getDefault();
+ defaultTimeZoneCache = tz;
+ return tz;
+ }
+
+ public boolean hasFastDefaultTimeZone() {
+ return DEFAULT_TIME_ZONE_FIELD != null;
+ }
+
+ /**
+ * Returns the SQL Time object matching the given bytes with {@link Oid#TIME} or
+ * {@link Oid#TIMETZ}.
+ *
+ * @param tz The timezone used when received data is {@link Oid#TIME}, ignored if data already
+ * contains {@link Oid#TIMETZ}.
+ * @param bytes The binary encoded time value.
+ * @return The parsed time object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public Time toTimeBin(TimeZone tz, byte[] bytes) throws PSQLException {
+ if (bytes.length != 8 && bytes.length != 12) {
+ throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
+ PSQLState.BAD_DATETIME_FORMAT);
+ }
+
+ long millis;
+ int timeOffset;
+
+ if (usesDouble) {
+ double time = ByteConverter.float8(bytes, 0);
+
+ millis = (long) (time * 1000);
+ } else {
+ long time = ByteConverter.int8(bytes, 0);
+
+ millis = time / 1000;
+ }
+
+ if (bytes.length == 12) {
+ timeOffset = ByteConverter.int4(bytes, 8);
+ timeOffset *= -1000;
+ millis -= timeOffset;
+ return new Time(millis);
+ }
+
+ if (tz == null) {
+ tz = getDefaultTz();
+ }
+
+ // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
+ // time
+ millis = guessTimestamp(millis, tz);
+
+ return convertToTime(millis, tz); // Ensure date part is 1970-01-01
+ }
+
+ /**
+ * Returns the SQL Time object matching the given bytes with {@link Oid#TIME}.
+ *
+ * @param bytes The binary encoded time value.
+ * @return The parsed time object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public LocalTime toLocalTimeBin(byte[] bytes) throws PSQLException {
+ if (bytes.length != 8) {
+ throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
+ PSQLState.BAD_DATETIME_FORMAT);
+ }
+
+ long micros;
+
+ if (usesDouble) {
+ double seconds = ByteConverter.float8(bytes, 0);
+
+ micros = (long) (seconds * 1000000d);
+ } else {
+ micros = ByteConverter.int8(bytes, 0);
+ }
+
+ return LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L));
+ }
+
+ /**
+ * Returns the SQL Timestamp object matching the given bytes with {@link Oid#TIMESTAMP} or
+ * {@link Oid#TIMESTAMPTZ}.
+ *
+ * @param tz The timezone used when received data is {@link Oid#TIMESTAMP}, ignored if data
+ * already contains {@link Oid#TIMESTAMPTZ}.
+ * @param bytes The binary encoded timestamp value.
+ * @param timestamptz True if the binary is in GMT.
+ * @return The parsed timestamp object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public Timestamp toTimestampBin(TimeZone tz, byte[] bytes, boolean timestamptz)
+ throws PSQLException {
+
+ ParsedBinaryTimestamp parsedTimestamp = this.toParsedTimestampBin(tz, bytes, timestamptz);
+ if (parsedTimestamp.infinity == Infinity.POSITIVE) {
+ return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
+ } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
+ return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
+ }
+
+ Timestamp ts = new Timestamp(parsedTimestamp.millis);
+ ts.setNanos(parsedTimestamp.nanos);
+ return ts;
+ }
+
+ private ParsedBinaryTimestamp toParsedTimestampBinPlain(byte[] bytes)
+ throws PSQLException {
+
+ if (bytes.length != 8) {
+ throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "timestamp"),
+ PSQLState.BAD_DATETIME_FORMAT);
+ }
+
+ long secs;
+ int nanos;
+
+ if (usesDouble) {
+ double time = ByteConverter.float8(bytes, 0);
+ if (time == Double.POSITIVE_INFINITY) {
+ ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+ ts.infinity = Infinity.POSITIVE;
+ return ts;
+ } else if (time == Double.NEGATIVE_INFINITY) {
+ ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+ ts.infinity = Infinity.NEGATIVE;
+ return ts;
+ }
+
+ secs = (long) time;
+ nanos = (int) ((time - secs) * 1000000);
+ } else {
+ long time = ByteConverter.int8(bytes, 0);
+
+ // compatibility with text based receiving, not strictly necessary
+ // and can actually be confusing because there are timestamps
+ // that are larger than infinite
+ if (time == Long.MAX_VALUE) {
+ ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+ ts.infinity = Infinity.POSITIVE;
+ return ts;
+ } else if (time == Long.MIN_VALUE) {
+ ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+ ts.infinity = Infinity.NEGATIVE;
+ return ts;
+ }
+
+ secs = time / 1000000;
+ nanos = (int) (time - secs * 1000000);
+ }
+ if (nanos < 0) {
+ secs--;
+ nanos += 1000000;
+ }
+ nanos *= 1000;
+
+ long millis = secs * 1000L;
+
+ ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+ ts.millis = millis;
+ ts.nanos = nanos;
+ return ts;
+ }
+
+ private ParsedBinaryTimestamp toParsedTimestampBin(TimeZone tz, byte[] bytes,
+ boolean timestamptz)
+ throws PSQLException {
+
+ ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes);
+ if (ts.infinity != null) {
+ return ts;
+ }
+
+ long secs = ts.millis / 1000L;
+
+ secs = toJavaSecs(secs);
+ long millis = secs * 1000L;
+ if (!timestamptz) {
+ // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
+ // time
+ millis = guessTimestamp(millis, tz);
+ }
+
+ ts.millis = millis;
+ return ts;
+ }
+
+ private ParsedBinaryTimestamp toProlepticParsedTimestampBin(byte[] bytes)
+ throws PSQLException {
+
+ ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes);
+ if (ts.infinity != null) {
+ return ts;
+ }
+
+ long secs = ts.millis / 1000L;
+
+ // postgres epoc to java epoc
+ secs += PG_EPOCH_DIFF.getSeconds();
+ long millis = secs * 1000L;
+
+ ts.millis = millis;
+ return ts;
+ }
+
+ /**
+ * Returns the local date time object matching the given bytes with {@link Oid#TIMESTAMP} or
+ * {@link Oid#TIMESTAMPTZ}.
+ * @param bytes The binary encoded local date time value.
+ *
+ * @return The parsed local date time object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public LocalDateTime toLocalDateTimeBin(byte[] bytes) throws PSQLException {
+
+ ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes);
+ if (parsedTimestamp.infinity == Infinity.POSITIVE) {
+ return LocalDateTime.MAX;
+ } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
+ return LocalDateTime.MIN;
+ }
+
+ // hardcode utc because the backend does not provide us the timezone
+ // Postgres is always UTC
+ return LocalDateTime.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos, ZoneOffset.UTC);
+ }
+
+ /**
+ * Returns the local date time object matching the given bytes with {@link Oid#DATE} or
+ * {@link Oid#TIMESTAMP}.
+ * @param bytes The binary encoded local date value.
+ *
+ * @return The parsed local date object.
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public LocalDate toLocalDateBin(byte[] bytes) throws PSQLException {
+ if (bytes.length != 4) {
+ throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"),
+ PSQLState.BAD_DATETIME_FORMAT);
+ }
+ int days = ByteConverter.int4(bytes, 0);
+ if (days == Integer.MAX_VALUE) {
+ return LocalDate.MAX;
+ } else if (days == Integer.MIN_VALUE) {
+ return LocalDate.MIN;
+ }
+ // adapt from different Postgres Epoch and convert to LocalDate:
+ return LocalDate.ofEpochDay(PG_EPOCH_DIFF.toDays() + days);
+ }
+
+ /**
+ * Given a UTC timestamp {@code millis} finds another point in time that is rendered in given time
+ * zone {@code tz} exactly as "millis in UTC".
+ *
+ * For instance, given 7 Jan 16:00 UTC and tz=GMT+02:00 it returns 7 Jan 14:00 UTC == 7 Jan 16:00
+ * GMT+02:00 Note that is not trivial for timestamps near DST change. For such cases, we rely on
+ * {@link Calendar} to figure out the proper timestamp.
+ *
+ * @param millis source timestamp
+ * @param tz desired time zone
+ * @return timestamp that would be rendered in {@code tz} like {@code millis} in UTC
+ */
+ private long guessTimestamp(long millis, TimeZone tz) {
+ if (tz == null) {
+ // If client did not provide us with time zone, we use system default time zone
+ tz = getDefaultTz();
+ }
+ // The story here:
+ // Backend provided us with something like '2015-10-04 13:40' and it did NOT provide us with a
+ // time zone.
+ // On top of that, user asked us to treat the timestamp as if it were in GMT+02:00.
+ //
+ // The code below creates such a timestamp that is rendered as '2015-10-04 13:40 GMT+02:00'
+ // In other words, its UTC value should be 11:40 UTC == 13:40 GMT+02:00.
+ // It is not sufficient to just subtract offset as you might cross DST change as you subtract.
+ //
+ // For instance, on 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
+ // Suppose we deal with 2000-03-26 02:00:01
+ // If you subtract offset from the timestamp, the time will be "a hour behind" since
+ // "just a couple of hours ago the OFFSET was different"
+ //
+ // To make a long story short: we have UTC timestamp that looks like "2000-03-26 02:00:01" when
+ // rendered in UTC tz.
+ // We want to know another timestamp that will look like "2000-03-26 02:00:01" in Europe/Moscow
+ // time zone.
+
+ if (isSimpleTimeZone(tz.getID())) {
+ // For well-known non-DST time zones, just subtract offset
+ return millis - tz.getRawOffset();
+ }
+ // For all the other time zones, enjoy debugging Calendar API
+ // Here we do a straight-forward implementation that splits original timestamp into pieces and
+ // composes it back.
+ // Note: cal.setTimeZone alone is not sufficient as it would alter hour (it will try to keep the
+ // same time instant value)
+ Calendar cal = calendarWithUserTz;
+ cal.setTimeZone(UTC_TIMEZONE);
+ cal.setTimeInMillis(millis);
+ int era = cal.get(Calendar.ERA);
+ int year = cal.get(Calendar.YEAR);
+ int month = cal.get(Calendar.MONTH);
+ int day = cal.get(Calendar.DAY_OF_MONTH);
+ int hour = cal.get(Calendar.HOUR_OF_DAY);
+ int min = cal.get(Calendar.MINUTE);
+ int sec = cal.get(Calendar.SECOND);
+ int ms = cal.get(Calendar.MILLISECOND);
+ cal.setTimeZone(tz);
+ cal.set(Calendar.ERA, era);
+ cal.set(Calendar.YEAR, year);
+ cal.set(Calendar.MONTH, month);
+ cal.set(Calendar.DAY_OF_MONTH, day);
+ cal.set(Calendar.HOUR_OF_DAY, hour);
+ cal.set(Calendar.MINUTE, min);
+ cal.set(Calendar.SECOND, sec);
+ cal.set(Calendar.MILLISECOND, ms);
+ return cal.getTimeInMillis();
+ }
+
+ private static boolean isSimpleTimeZone(String id) {
+ return id.startsWith("GMT") || id.startsWith("UTC");
+ }
+
+ /**
+ * Extracts the date part from a timestamp.
+ *
+ * @param millis The timestamp from which to extract the date.
+ * @param tz The time zone of the date.
+ * @return The extracted date.
+ */
+ public Date convertToDate(long millis, TimeZone tz) {
+
+ // no adjustments for the infinity hack values
+ if (millis <= PGStatement.DATE_NEGATIVE_INFINITY
+ || millis >= PGStatement.DATE_POSITIVE_INFINITY) {
+ return new Date(millis);
+ }
+ if (tz == null) {
+ tz = getDefaultTz();
+ }
+ if (isSimpleTimeZone(tz.getID())) {
+ // Truncate to 00:00 of the day.
+ // Suppose the input date is 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC)
+ // We want it to become 7 Jan 00:00 GMT+02:00
+ // 1) Make sure millis becomes 15:40 in UTC, so add offset
+ int offset = tz.getRawOffset();
+ millis += offset;
+ // 2) Truncate hours, minutes, etc. Day is always 86400 seconds, no matter what leap seconds
+ // are
+ millis = floorDiv(millis, ONEDAY) * ONEDAY;
+ // 2) Now millis is 7 Jan 00:00 UTC, however we need that in GMT+02:00, so subtract some
+ // offset
+ millis -= offset;
+ // Now we have brand-new 7 Jan 00:00 GMT+02:00
+ return new Date(millis);
+ }
+
+ Calendar cal = calendarWithUserTz;
+ cal.setTimeZone(tz);
+ cal.setTimeInMillis(millis);
+ cal.set(Calendar.HOUR_OF_DAY, 0);
+ cal.set(Calendar.MINUTE, 0);
+ cal.set(Calendar.SECOND, 0);
+ cal.set(Calendar.MILLISECOND, 0);
+
+ return new Date(cal.getTimeInMillis());
+ }
+
+ /**
+ * Extracts the time part from a timestamp. This method ensures the date part of output timestamp
+ * looks like 1970-01-01 in given timezone.
+ *
+ * @param millis The timestamp from which to extract the time.
+ * @param tz timezone to use.
+ * @return The extracted time.
+ */
+ public Time convertToTime(long millis, TimeZone tz) {
+ if (tz == null) {
+ tz = getDefaultTz();
+ }
+ if (isSimpleTimeZone(tz.getID())) {
+ // Leave just time part of the day.
+ // Suppose the input date is 2015 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC)
+ // We want it to become 1970 1 Jan 15:40 GMT+02:00
+ // 1) Make sure millis becomes 15:40 in UTC, so add offset
+ int offset = tz.getRawOffset();
+ millis += offset;
+ // 2) Truncate year, month, day. Day is always 86400 seconds, no matter what leap seconds are
+ millis = floorMod(millis, ONEDAY);
+ // 2) Now millis is 1970 1 Jan 15:40 UTC, however we need that in GMT+02:00, so subtract some
+ // offset
+ millis -= offset;
+ // Now we have brand-new 1970 1 Jan 15:40 GMT+02:00
+ return new Time(millis);
+ }
+ Calendar cal = calendarWithUserTz;
+ cal.setTimeZone(tz);
+ cal.setTimeInMillis(millis);
+ cal.set(Calendar.ERA, GregorianCalendar.AD);
+ cal.set(Calendar.YEAR, 1970);
+ cal.set(Calendar.MONTH, 0);
+ cal.set(Calendar.DAY_OF_MONTH, 1);
+
+ return new Time(cal.getTimeInMillis());
+ }
+
+ /**
+ * Returns the given time value as String matching what the current postgresql server would send
+ * in text mode.
+ *
+ * @param time time value
+ * @param withTimeZone whether timezone should be added
+ * @return given time value as String
+ */
+ public String timeToString(java.util.Date time, boolean withTimeZone) {
+ Calendar cal = null;
+ if (withTimeZone) {
+ cal = calendarWithUserTz;
+ cal.setTimeZone(timeZoneProvider.get());
+ }
+ if (time instanceof Timestamp) {
+ return toString(cal, (Timestamp) time, withTimeZone);
+ }
+ if (time instanceof Time) {
+ return toString(cal, (Time) time, withTimeZone);
+ }
+ return toString(cal, (Date) time, withTimeZone);
+ }
+
+ /**
+ * Converts the given postgresql seconds to java seconds. Reverse engineered by inserting varying
+ * dates to postgresql and tuning the formula until the java dates matched. See {@link #toPgSecs}
+ * for the reverse operation.
+ *
+ * @param secs Postgresql seconds.
+ * @return Java seconds.
+ */
+ private static long toJavaSecs(long secs) {
+ // postgres epoc to java epoc
+ secs += PG_EPOCH_DIFF.getSeconds();
+
+ // Julian/Gregorian calendar cutoff point
+ if (secs < -12219292800L) { // October 4, 1582 -> October 15, 1582
+ secs += 86400 * 10;
+ if (secs < -14825808000L) { // 1500-02-28 -> 1500-03-01
+ int extraLeaps = (int) ((secs + 14825808000L) / 3155760000L);
+ extraLeaps--;
+ extraLeaps -= extraLeaps / 4;
+ secs += extraLeaps * 86400L;
+ }
+ }
+ return secs;
+ }
+
+ /**
+ * Converts the given java seconds to postgresql seconds. See {@link #toJavaSecs} for the reverse
+ * operation. The conversion is valid for any year 100 BC onwards.
+ *
+ * @param secs Postgresql seconds.
+ * @return Java seconds.
+ */
+ private static long toPgSecs(long secs) {
+ // java epoc to postgres epoc
+ secs -= PG_EPOCH_DIFF.getSeconds();
+
+ // Julian/Gregorian calendar cutoff point
+ if (secs < -13165977600L) { // October 15, 1582 -> October 4, 1582
+ secs -= 86400 * 10;
+ if (secs < -15773356800L) { // 1500-03-01 -> 1500-02-28
+ int years = (int) ((secs + 15773356800L) / -3155823050L);
+ years++;
+ years -= years / 4;
+ secs += years * 86400L;
+ }
+ }
+
+ return secs;
+ }
+
+ /**
+ * Converts the SQL Date to binary representation for {@link Oid#DATE}.
+ *
+ * @param tz The timezone used.
+ * @param bytes The binary encoded date value.
+ * @param value value
+ * @throws PSQLException If binary format could not be parsed.
+ */
+ public void toBinDate(TimeZone tz, byte[] bytes, Date value) throws PSQLException {
+ long millis = value.getTime();
+
+ if (tz == null) {
+ tz = getDefaultTz();
+ }
+ // It "getOffset" is UNTESTED
+ // See org.postgresql.jdbc.AbstractJdbc2Statement.setDate(int, java.sql.Date,
+ // java.util.Calendar)
+ // The problem is we typically do not know for sure what is the exact required date/timestamp
+ // type
+ // Thus pgjdbc sticks to text transfer.
+ millis += tz.getOffset(millis);
+
+ long secs = toPgSecs(millis / 1000);
+ ByteConverter.int4(bytes, 0, (int) (secs / 86400));
+ }
+
+ /**
+ * Converts backend's TimeZone parameter to java format.
+ * Notable difference: backend's gmt-3 is GMT+03 in Java.
+ *
+ * @param timeZone time zone to use
+ * @return java TimeZone
+ */
+ public static TimeZone parseBackendTimeZone(String timeZone) {
+ if (timeZone.startsWith("GMT")) {
+ TimeZone tz = GMT_ZONES.get(timeZone);
+ if (tz != null) {
+ return tz;
+ }
+ }
+ return TimeZone.getTimeZone(timeZone);
+ }
+
+ private static long floorDiv(long x, long y) {
+ long r = x / y;
+ // if the signs are different and modulo not zero, round down
+ if ((x ^ y) < 0 && (r * y != x)) {
+ r--;
+ }
+ return r;
+ }
+
+ private static long floorMod(long x, long y) {
+ return x - floorDiv(x, y) * y;
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java b/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java
new file mode 100644
index 0000000..615b653
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java
@@ -0,0 +1,1095 @@
+/*
+ * Copyright (c) 2005, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.BaseStatement;
+import org.postgresql.core.Oid;
+import org.postgresql.core.QueryExecutor;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.core.TypeInfo;
+import org.postgresql.util.GT;
+import org.postgresql.util.PGobject;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+@SuppressWarnings("try")
+public class TypeInfoCache implements TypeInfo {
+
+ private static final Logger LOGGER = Logger.getLogger(TypeInfoCache.class.getName());
+
+ // pgname (String) -> java.sql.Types (Integer)
+ private final Map pgNameToSQLType;
+
+ private final Map oidToSQLType;
+
+ // pgname (String) -> java class name (String)
+ // ie "text" -> "java.lang.String"
+ private final Map pgNameToJavaClass;
+
+ // oid (Integer) -> pgname (String)
+ private final Map oidToPgName;
+ // pgname (String) -> oid (Integer)
+ private final Map pgNameToOid;
+
+ private final Map javaArrayTypeToOid;
+
+ // pgname (String) -> extension pgobject (Class)
+ private final Map> pgNameToPgObject;
+
+ // type array oid -> base type's oid
+ private final Map pgArrayToPgType;
+
+ // array type oid -> base type array element delimiter
+ private final Map arrayOidToDelimiter;
+
+ private final BaseConnection conn;
+ private final int unknownLength;
+ private PreparedStatement getOidStatementSimple;
+ private PreparedStatement getOidStatementComplexNonArray;
+ private PreparedStatement getOidStatementComplexArray;
+ private PreparedStatement getNameStatement;
+ private PreparedStatement getArrayElementOidStatement;
+ private PreparedStatement getArrayDelimiterStatement;
+ private PreparedStatement getTypeInfoStatement;
+ private PreparedStatement getAllTypeInfoStatement;
+ private final ResourceLock lock = new ResourceLock();
+
+ // basic pg types info:
+ // 0 - type name
+ // 1 - type oid
+ // 2 - sql type
+ // 3 - java class
+ // 4 - array type oid
+ private static final Object[][] types = {
+ {"int2", Oid.INT2, Types.SMALLINT, "java.lang.Integer", Oid.INT2_ARRAY},
+ {"int4", Oid.INT4, Types.INTEGER, "java.lang.Integer", Oid.INT4_ARRAY},
+ {"oid", Oid.OID, Types.BIGINT, "java.lang.Long", Oid.OID_ARRAY},
+ {"int8", Oid.INT8, Types.BIGINT, "java.lang.Long", Oid.INT8_ARRAY},
+ {"money", Oid.MONEY, Types.DOUBLE, "java.lang.Double", Oid.MONEY_ARRAY},
+ {"numeric", Oid.NUMERIC, Types.NUMERIC, "java.math.BigDecimal", Oid.NUMERIC_ARRAY},
+ {"float4", Oid.FLOAT4, Types.REAL, "java.lang.Float", Oid.FLOAT4_ARRAY},
+ {"float8", Oid.FLOAT8, Types.DOUBLE, "java.lang.Double", Oid.FLOAT8_ARRAY},
+ {"char", Oid.CHAR, Types.CHAR, "java.lang.String", Oid.CHAR_ARRAY},
+ {"bpchar", Oid.BPCHAR, Types.CHAR, "java.lang.String", Oid.BPCHAR_ARRAY},
+ {"varchar", Oid.VARCHAR, Types.VARCHAR, "java.lang.String", Oid.VARCHAR_ARRAY},
+ {"varbit", Oid.VARBIT, Types.OTHER, "java.lang.String", Oid.VARBIT_ARRAY},
+ {"text", Oid.TEXT, Types.VARCHAR, "java.lang.String", Oid.TEXT_ARRAY},
+ {"name", Oid.NAME, Types.VARCHAR, "java.lang.String", Oid.NAME_ARRAY},
+ {"bytea", Oid.BYTEA, Types.BINARY, "[B", Oid.BYTEA_ARRAY},
+ {"bool", Oid.BOOL, Types.BIT, "java.lang.Boolean", Oid.BOOL_ARRAY},
+ {"bit", Oid.BIT, Types.BIT, "java.lang.Boolean", Oid.BIT_ARRAY},
+ {"date", Oid.DATE, Types.DATE, "java.sql.Date", Oid.DATE_ARRAY},
+ {"time", Oid.TIME, Types.TIME, "java.sql.Time", Oid.TIME_ARRAY},
+ {"timetz", Oid.TIMETZ, Types.TIME, "java.sql.Time", Oid.TIMETZ_ARRAY},
+ {"timestamp", Oid.TIMESTAMP, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMP_ARRAY},
+ {"timestamptz", Oid.TIMESTAMPTZ, Types.TIMESTAMP, "java.sql.Timestamp",
+ Oid.TIMESTAMPTZ_ARRAY},
+ {"refcursor", Oid.REF_CURSOR, Types.REF_CURSOR, "java.sql.ResultSet", Oid.REF_CURSOR_ARRAY},
+ {"json", Oid.JSON, Types.OTHER, "org.postgresql.util.PGobject", Oid.JSON_ARRAY},
+ {"point", Oid.POINT, Types.OTHER, "org.postgresql.geometric.PGpoint", Oid.POINT_ARRAY},
+ {"box", Oid.BOX, Types.OTHER, "org.postgresql.geometric.PGBox", Oid.BOX_ARRAY}
+ };
+
+ /**
+ * PG maps several alias to real type names. When we do queries against pg_catalog, we must use
+ * the real type, not an alias, so use this mapping.
+ *
+ * Additional values used at runtime (including case variants) will be added to the map.
+ *
+ */
+ private static final ConcurrentMap TYPE_ALIASES = new ConcurrentHashMap<>(30);
+
+ static {
+ TYPE_ALIASES.put("bool", "bool");
+ TYPE_ALIASES.put("boolean", "bool");
+ TYPE_ALIASES.put("smallint", "int2");
+ TYPE_ALIASES.put("int2", "int2");
+ TYPE_ALIASES.put("int", "int4");
+ TYPE_ALIASES.put("integer", "int4");
+ TYPE_ALIASES.put("int4", "int4");
+ TYPE_ALIASES.put("long", "int8");
+ TYPE_ALIASES.put("int8", "int8");
+ TYPE_ALIASES.put("bigint", "int8");
+ TYPE_ALIASES.put("float", "float8");
+ TYPE_ALIASES.put("real", "float4");
+ TYPE_ALIASES.put("float4", "float4");
+ TYPE_ALIASES.put("double", "float8");
+ TYPE_ALIASES.put("double precision", "float8");
+ TYPE_ALIASES.put("float8", "float8");
+ TYPE_ALIASES.put("decimal", "numeric");
+ TYPE_ALIASES.put("numeric", "numeric");
+ TYPE_ALIASES.put("character varying", "varchar");
+ TYPE_ALIASES.put("varchar", "varchar");
+ TYPE_ALIASES.put("time without time zone", "time");
+ TYPE_ALIASES.put("time", "time");
+ TYPE_ALIASES.put("time with time zone", "timetz");
+ TYPE_ALIASES.put("timetz", "timetz");
+ TYPE_ALIASES.put("timestamp without time zone", "timestamp");
+ TYPE_ALIASES.put("timestamp", "timestamp");
+ TYPE_ALIASES.put("timestamp with time zone", "timestamptz");
+ TYPE_ALIASES.put("timestamptz", "timestamptz");
+ }
+
+ @SuppressWarnings("this-escape")
+ public TypeInfoCache(BaseConnection conn, int unknownLength) {
+ this.conn = conn;
+ this.unknownLength = unknownLength;
+ oidToPgName = new HashMap<>((int) Math.round(types.length * 1.5));
+ pgNameToOid = new HashMap<>((int) Math.round(types.length * 1.5));
+ javaArrayTypeToOid = new HashMap<>((int) Math.round(types.length * 1.5));
+ pgNameToJavaClass = new HashMap<>((int) Math.round(types.length * 1.5));
+ pgNameToPgObject = new HashMap<>((int) Math.round(types.length * 1.5));
+ pgArrayToPgType = new HashMap<>((int) Math.round(types.length * 1.5));
+ arrayOidToDelimiter = new HashMap<>((int) Math.round(types.length * 2.5));
+
+ // needs to be synchronized because the iterator is returned
+ // from getPGTypeNamesWithSQLTypes()
+ pgNameToSQLType = Collections.synchronizedMap(new HashMap((int) Math.round(types.length * 1.5)));
+ oidToSQLType = Collections.synchronizedMap(new HashMap((int) Math.round(types.length * 1.5)));
+
+ for (Object[] type : types) {
+ String pgTypeName = (String) type[0];
+ Integer oid = (Integer) type[1];
+ Integer sqlType = (Integer) type[2];
+ String javaClass = (String) type[3];
+ Integer arrayOid = (Integer) type[4];
+
+ addCoreType(pgTypeName, oid, sqlType, javaClass, arrayOid);
+ }
+
+ pgNameToJavaClass.put("hstore", Map.class.getName());
+ }
+
+ @Override
+ public void addCoreType(String pgTypeName, Integer oid, Integer sqlType,
+ String javaClass, Integer arrayOid) {
+ try (ResourceLock ignore = lock.obtain()) {
+ pgNameToJavaClass.put(pgTypeName, javaClass);
+ pgNameToOid.put(pgTypeName, oid);
+ oidToPgName.put(oid, pgTypeName);
+ javaArrayTypeToOid.put(javaClass, arrayOid);
+ pgArrayToPgType.put(arrayOid, oid);
+ pgNameToSQLType.put(pgTypeName, sqlType);
+ oidToSQLType.put(oid, sqlType);
+
+ // Currently we hardcode all core types array delimiter
+ // to a comma. In a stock install the only exception is
+ // the box datatype and it's not a JDBC core type.
+ //
+ Character delim = ',';
+ if ("box".equals(pgTypeName)) {
+ delim = ';';
+ }
+ arrayOidToDelimiter.put(oid, delim);
+ arrayOidToDelimiter.put(arrayOid, delim);
+
+ String pgArrayTypeName = pgTypeName + "[]";
+ pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array");
+ pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY);
+ oidToSQLType.put(arrayOid, Types.ARRAY);
+ pgNameToOid.put(pgArrayTypeName, arrayOid);
+ pgArrayTypeName = "_" + pgTypeName;
+ if (!pgNameToJavaClass.containsKey(pgArrayTypeName)) {
+ pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array");
+ pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY);
+ pgNameToOid.put(pgArrayTypeName, arrayOid);
+ oidToPgName.put(arrayOid, pgArrayTypeName);
+ }
+ }
+ }
+
+ @Override
+ public void addDataType(String type, Class extends PGobject> klass)
+ throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ pgNameToPgObject.put(type, klass);
+ pgNameToJavaClass.put(type, klass.getName());
+ }
+ }
+
+ @Override
+ public Iterator getPGTypeNamesWithSQLTypes() {
+ return pgNameToSQLType.keySet().iterator();
+ }
+
+ @Override
+ public Iterator getPGTypeOidsWithSQLTypes() {
+ return oidToSQLType.keySet().iterator();
+ }
+
+ private String getSQLTypeQuery(boolean typoidParam) {
+ // There's no great way of telling what's an array type.
+ // People can name their own types starting with _.
+ // Other types use typelem that aren't actually arrays, like box.
+ //
+ // in case of multiple records (in different schemas) choose the one from the current
+ // schema,
+ // otherwise take the last version of a type that is at least more deterministic then before
+ // (keeping old behaviour of finding types, that should not be found without correct search
+ // path)
+ StringBuilder sql = new StringBuilder();
+ sql.append("SELECT typinput='pg_catalog.array_in'::regproc as is_array, typtype, typname, pg_type.oid ");
+ sql.append(" FROM pg_catalog.pg_type ");
+ sql.append(" LEFT JOIN (select ns.oid as nspoid, ns.nspname, r.r ");
+ sql.append(" from pg_namespace as ns ");
+ // -- go with older way of unnesting array to be compatible with 8.0
+ sql.append(" join ( select s.r, (current_schemas(false))[s.r] as nspname ");
+ sql.append(" from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r ");
+ sql.append(" using ( nspname ) ");
+ sql.append(" ) as sp ");
+ sql.append(" ON sp.nspoid = typnamespace ");
+ if (typoidParam) {
+ sql.append(" WHERE pg_type.oid = ? ");
+ }
+ sql.append(" ORDER BY sp.r, pg_type.oid DESC;");
+ return sql.toString();
+ }
+
+ private int getSQLTypeFromQueryResult(ResultSet rs) throws SQLException {
+ Integer type = null;
+ boolean isArray = rs.getBoolean("is_array");
+ String typtype = rs.getString("typtype");
+ if (isArray) {
+ type = Types.ARRAY;
+ } else if ("c".equals(typtype)) {
+ type = Types.STRUCT;
+ } else if ("d".equals(typtype)) {
+ type = Types.DISTINCT;
+ } else if ("e".equals(typtype)) {
+ type = Types.VARCHAR;
+ }
+ if (type == null) {
+ type = Types.OTHER;
+ }
+ return type;
+ }
+
+ private PreparedStatement prepareGetAllTypeInfoStatement() throws SQLException {
+ PreparedStatement getAllTypeInfoStatement = this.getAllTypeInfoStatement;
+ if (getAllTypeInfoStatement == null) {
+ getAllTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(false));
+ this.getAllTypeInfoStatement = getAllTypeInfoStatement;
+ }
+ return getAllTypeInfoStatement;
+ }
+
+ public void cacheSQLTypes() throws SQLException {
+ LOGGER.log(Level.FINEST, "caching all SQL typecodes");
+ PreparedStatement getAllTypeInfoStatement = prepareGetAllTypeInfoStatement();
+ // Go through BaseStatement to avoid transaction start.
+ if (!((BaseStatement) getAllTypeInfoStatement)
+ .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+ ResultSet rs = getAllTypeInfoStatement.getResultSet();
+ while (rs.next()) {
+ String typeName = rs.getString("typname");
+ Integer type = getSQLTypeFromQueryResult(rs);
+ if (!pgNameToSQLType.containsKey(typeName)) {
+ pgNameToSQLType.put(typeName, type);
+ }
+
+ Integer typeOid = longOidToInt(rs.getLong("oid"));
+ if (!oidToSQLType.containsKey(typeOid)) {
+ oidToSQLType.put(typeOid, type);
+ }
+ }
+ rs.close();
+ }
+
+ private PreparedStatement prepareGetTypeInfoStatement() throws SQLException {
+ PreparedStatement getTypeInfoStatement = this.getTypeInfoStatement;
+ if (getTypeInfoStatement == null) {
+ getTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(true));
+ this.getTypeInfoStatement = getTypeInfoStatement;
+ }
+ return getTypeInfoStatement;
+ }
+
+ @Override
+ public int getSQLType(String pgTypeName) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ /*
+ Get a few things out of the way such as arrays and known types
+ */
+ if (pgTypeName.endsWith("[]")) {
+ return Types.ARRAY;
+ }
+ Integer i = this.pgNameToSQLType.get(pgTypeName);
+ if (i != null) {
+ return i;
+ }
+
+ /*
+ All else fails then we will query the database.
+ save for future calls
+ */
+ i = getSQLType(getPGType(pgTypeName));
+
+ pgNameToSQLType.put(pgTypeName, i);
+ return i;
+ }
+ }
+
+ @Override
+ public int getJavaArrayType(String className) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ Integer oid = javaArrayTypeToOid.get(className);
+ if (oid == null) {
+ return Oid.UNSPECIFIED;
+ }
+ return oid;
+ }
+ }
+
+ @Override
+ public int getSQLType(int typeOid) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (typeOid == Oid.UNSPECIFIED) {
+ return Types.OTHER;
+ }
+
+ Integer i = oidToSQLType.get(typeOid);
+ if (i != null) {
+ return i;
+ }
+
+ LOGGER.log(Level.FINEST, "querying SQL typecode for pg type oid ''{0}''", intOidToLong(typeOid));
+
+ PreparedStatement getTypeInfoStatement = prepareGetTypeInfoStatement();
+
+ getTypeInfoStatement.setLong(1, intOidToLong(typeOid));
+
+ // Go through BaseStatement to avoid transaction start.
+ if (!((BaseStatement) getTypeInfoStatement)
+ .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ ResultSet rs = getTypeInfoStatement.getResultSet();
+
+ int sqlType = Types.OTHER;
+ if (rs.next()) {
+ sqlType = getSQLTypeFromQueryResult(rs);
+ }
+ rs.close();
+
+ oidToSQLType.put(typeOid, sqlType);
+ return sqlType;
+ }
+ }
+
+ private PreparedStatement getOidStatement(String pgTypeName) throws SQLException {
+ boolean isArray = pgTypeName.endsWith("[]");
+ boolean hasQuote = pgTypeName.contains("\"");
+ int dotIndex = pgTypeName.indexOf('.');
+
+ if (dotIndex == -1 && !hasQuote && !isArray) {
+ PreparedStatement getOidStatementSimple = this.getOidStatementSimple;
+ if (getOidStatementSimple == null) {
+ String sql;
+ // see comments in @getSQLType()
+ // -- go with older way of unnesting array to be compatible with 8.0
+ sql = "SELECT pg_type.oid, typname "
+ + " FROM pg_catalog.pg_type "
+ + " LEFT "
+ + " JOIN (select ns.oid as nspoid, ns.nspname, r.r "
+ + " from pg_namespace as ns "
+ + " join ( select s.r, (current_schemas(false))[s.r] as nspname "
+ + " from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r "
+ + " using ( nspname ) "
+ + " ) as sp "
+ + " ON sp.nspoid = typnamespace "
+ + " WHERE typname = ? "
+ + " ORDER BY sp.r, pg_type.oid DESC LIMIT 1;";
+ this.getOidStatementSimple = getOidStatementSimple = conn.prepareStatement(sql);
+ }
+ // coerce to lower case to handle upper case type names
+ String lcName = pgTypeName.toLowerCase(Locale.ROOT);
+ // default arrays are represented with _ as prefix ... this dont even work for public schema
+ // fully
+ getOidStatementSimple.setString(1, lcName);
+ return getOidStatementSimple;
+ }
+ PreparedStatement oidStatementComplex;
+ if (isArray) {
+ PreparedStatement getOidStatementComplexArray = this.getOidStatementComplexArray;
+ if (getOidStatementComplexArray == null) {
+ String sql;
+ if (conn.haveMinimumServerVersion(ServerVersion.v8_3)) {
+ sql = "SELECT t.typarray, arr.typname "
+ + " FROM pg_catalog.pg_type t"
+ + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
+ + " JOIN pg_catalog.pg_type arr ON arr.oid = t.typarray"
+ + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
+ + " ORDER BY t.oid DESC LIMIT 1";
+ } else {
+ sql = "SELECT t.oid, t.typname "
+ + " FROM pg_catalog.pg_type t"
+ + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
+ + " WHERE t.typelem = (SELECT oid FROM pg_catalog.pg_type WHERE typname = ?)"
+ + " AND substring(t.typname, 1, 1) = '_' AND t.typlen = -1"
+ + " AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
+ + " ORDER BY t.typelem DESC LIMIT 1";
+ }
+ this.getOidStatementComplexArray = getOidStatementComplexArray = conn.prepareStatement(sql);
+ }
+ oidStatementComplex = getOidStatementComplexArray;
+ } else {
+ PreparedStatement getOidStatementComplexNonArray = this.getOidStatementComplexNonArray;
+ if (getOidStatementComplexNonArray == null) {
+ String sql = "SELECT t.oid, t.typname "
+ + " FROM pg_catalog.pg_type t"
+ + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
+ + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
+ + " ORDER BY t.oid DESC LIMIT 1";
+ this.getOidStatementComplexNonArray = getOidStatementComplexNonArray = conn.prepareStatement(sql);
+ }
+ oidStatementComplex = getOidStatementComplexNonArray;
+ }
+ //type name requested may be schema specific, of the form "{schema}"."typeName",
+ //or may check across all schemas where a schema is not specified.
+ String fullName = isArray ? pgTypeName.substring(0, pgTypeName.length() - 2) : pgTypeName;
+ String schema;
+ String name;
+ // simple use case
+ if (dotIndex == -1) {
+ schema = null;
+ name = fullName;
+ } else {
+ if (fullName.startsWith("\"")) {
+ if (fullName.endsWith("\"")) {
+ String[] parts = fullName.split("\"\\.\"");
+ schema = parts.length == 2 ? parts[0] + "\"" : null;
+ name = parts.length == 2 ? "\"" + parts[1] : parts[0];
+ } else {
+ int lastDotIndex = fullName.lastIndexOf('.');
+ name = fullName.substring(lastDotIndex + 1);
+ schema = fullName.substring(0, lastDotIndex);
+ }
+ } else {
+ schema = fullName.substring(0, dotIndex);
+ name = fullName.substring(dotIndex + 1);
+ }
+ }
+ if (schema != null && schema.startsWith("\"") && schema.endsWith("\"")) {
+ schema = schema.substring(1, schema.length() - 1);
+ } else if (schema != null) {
+ schema = schema.toLowerCase(Locale.ROOT);
+ }
+ if (name.startsWith("\"") && name.endsWith("\"")) {
+ name = name.substring(1, name.length() - 1);
+ } else {
+ name = name.toLowerCase(Locale.ROOT);
+ }
+ oidStatementComplex.setString(1, name);
+ oidStatementComplex.setString(2, schema);
+ oidStatementComplex.setBoolean(3, schema == null);
+ return oidStatementComplex;
+ }
+
+ @Override
+ public int getPGType(String pgTypeName) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ // there really isn't anything else to return other than UNSPECIFIED here.
+ if (pgTypeName == null) {
+ return Oid.UNSPECIFIED;
+ }
+
+ Integer oid = pgNameToOid.get(pgTypeName);
+ if (oid != null) {
+ return oid;
+ }
+
+ PreparedStatement oidStatement = getOidStatement(pgTypeName);
+
+ // Go through BaseStatement to avoid transaction start.
+ if (!((BaseStatement) oidStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ oid = Oid.UNSPECIFIED;
+ ResultSet rs = oidStatement.getResultSet();
+ if (rs.next()) {
+ oid = (int) rs.getLong(1);
+ String internalName = rs.getString(2);
+ oidToPgName.put(oid, internalName);
+ pgNameToOid.put(internalName, oid);
+ }
+ pgNameToOid.put(pgTypeName, oid);
+ rs.close();
+
+ return oid;
+ }
+ }
+
+ @Override
+ public String getPGType(int oid) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (oid == Oid.UNSPECIFIED) {
+ // TODO: it would be great to forbid UNSPECIFIED argument, and make the return type non-nullable
+ return null;
+ }
+
+ String pgTypeName = oidToPgName.get(oid);
+ if (pgTypeName != null) {
+ return pgTypeName;
+ }
+
+ PreparedStatement getNameStatement = prepareGetNameStatement();
+
+ getNameStatement.setInt(1, oid);
+
+ // Go through BaseStatement to avoid transaction start.
+ if (!((BaseStatement) getNameStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ ResultSet rs = getNameStatement.getResultSet();
+ if (rs.next()) {
+ boolean onPath = rs.getBoolean(1);
+ String schema = rs.getString(2);
+ String name = rs.getString(3);
+ if (onPath) {
+ pgTypeName = name;
+ pgNameToOid.put(schema + "." + name, oid);
+ } else {
+ // TODO: escaping !?
+ pgTypeName = "\"" + schema + "\".\"" + name + "\"";
+ // if all is lowercase add special type info
+ // TODO: should probably check for all special chars
+ if (schema.equals(schema.toLowerCase(Locale.ROOT)) && schema.indexOf('.') == -1
+ && name.equals(name.toLowerCase(Locale.ROOT)) && name.indexOf('.') == -1) {
+ pgNameToOid.put(schema + "." + name, oid);
+ }
+ }
+ pgNameToOid.put(pgTypeName, oid);
+ oidToPgName.put(oid, pgTypeName);
+ }
+ rs.close();
+
+ return pgTypeName;
+ }
+ }
+
+ private PreparedStatement prepareGetNameStatement() throws SQLException {
+ PreparedStatement getNameStatement = this.getNameStatement;
+ if (getNameStatement == null) {
+ String sql;
+ sql = "SELECT n.nspname = ANY(current_schemas(true)), n.nspname, t.typname "
+ + "FROM pg_catalog.pg_type t "
+ + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?";
+
+ this.getNameStatement = getNameStatement = conn.prepareStatement(sql);
+ }
+ return getNameStatement;
+ }
+
+ @Override
+ public int getPGArrayType(String elementTypeName) throws SQLException {
+ elementTypeName = getTypeForAlias(elementTypeName);
+ return getPGType(elementTypeName + "[]");
+ }
+
+ /**
+ * Return the oid of the array's base element if it's an array, if not return the provided oid.
+ * This doesn't do any database lookups, so it's only useful for the originally provided type
+ * mappings. This is fine for it's intended uses where we only have intimate knowledge of types
+ * that are already known to the driver.
+ *
+ * @param oid input oid
+ * @return oid of the array's base element or the provided oid (if not array)
+ */
+ protected int convertArrayToBaseOid(int oid) {
+ try (ResourceLock ignore = lock.obtain()) {
+ Integer i = pgArrayToPgType.get(oid);
+ if (i == null) {
+ return oid;
+ }
+ return i;
+ }
+ }
+
+ @Override
+ public char getArrayDelimiter(int oid) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (oid == Oid.UNSPECIFIED) {
+ return ',';
+ }
+
+ Character delim = arrayOidToDelimiter.get(oid);
+ if (delim != null) {
+ return delim;
+ }
+
+ PreparedStatement getArrayDelimiterStatement = prepareGetArrayDelimiterStatement();
+
+ getArrayDelimiterStatement.setInt(1, oid);
+
+ // Go through BaseStatement to avoid transaction start.
+ if (!((BaseStatement) getArrayDelimiterStatement)
+ .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ ResultSet rs = getArrayDelimiterStatement.getResultSet();
+ if (!rs.next()) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ String s = rs.getString(1);
+ delim = s.charAt(0);
+
+ arrayOidToDelimiter.put(oid, delim);
+
+ rs.close();
+
+ return delim;
+ }
+ }
+
+ private PreparedStatement prepareGetArrayDelimiterStatement() throws SQLException {
+ PreparedStatement getArrayDelimiterStatement = this.getArrayDelimiterStatement;
+ if (getArrayDelimiterStatement == null) {
+ String sql;
+ sql = "SELECT e.typdelim FROM pg_catalog.pg_type t, pg_catalog.pg_type e "
+ + "WHERE t.oid = ? and t.typelem = e.oid";
+ this.getArrayDelimiterStatement = getArrayDelimiterStatement = conn.prepareStatement(sql);
+ }
+ return getArrayDelimiterStatement;
+ }
+
+ @Override
+ public int getPGArrayElement(int oid) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (oid == Oid.UNSPECIFIED) {
+ return Oid.UNSPECIFIED;
+ }
+
+ Integer pgType = pgArrayToPgType.get(oid);
+
+ if (pgType != null) {
+ return pgType;
+ }
+
+ PreparedStatement getArrayElementOidStatement = prepareGetArrayElementOidStatement();
+
+ getArrayElementOidStatement.setInt(1, oid);
+
+ // Go through BaseStatement to avoid transaction start.
+ if (!((BaseStatement) getArrayElementOidStatement)
+ .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ ResultSet rs = getArrayElementOidStatement.getResultSet();
+ if (!rs.next()) {
+ throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+ }
+
+ pgType = (int) rs.getLong(1);
+ boolean onPath = rs.getBoolean(2);
+ String schema = rs.getString(3);
+ String name = rs.getString(4);
+ pgArrayToPgType.put(oid, pgType);
+ pgNameToOid.put(schema + "." + name, pgType);
+ String fullName = "\"" + schema + "\".\"" + name + "\"";
+ pgNameToOid.put(fullName, pgType);
+ if (onPath && name.equals(name.toLowerCase(Locale.ROOT))) {
+ oidToPgName.put(pgType, name);
+ pgNameToOid.put(name, pgType);
+ } else {
+ oidToPgName.put(pgType, fullName);
+ }
+
+ rs.close();
+
+ return pgType;
+ }
+ }
+
+ private PreparedStatement prepareGetArrayElementOidStatement() throws SQLException {
+ PreparedStatement getArrayElementOidStatement = this.getArrayElementOidStatement;
+ if (getArrayElementOidStatement == null) {
+ String sql;
+ sql = "SELECT e.oid, n.nspname = ANY(current_schemas(true)), n.nspname, e.typname "
+ + "FROM pg_catalog.pg_type t JOIN pg_catalog.pg_type e ON t.typelem = e.oid "
+ + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?";
+ this.getArrayElementOidStatement = getArrayElementOidStatement = conn.prepareStatement(sql);
+ }
+ return getArrayElementOidStatement;
+ }
+
+ @Override
+ public Class extends PGobject> getPGobject(String type) {
+ try (ResourceLock ignore = lock.obtain()) {
+ return pgNameToPgObject.get(type);
+ }
+ }
+
+ @Override
+ public String getJavaClass(int oid) throws SQLException {
+ try (ResourceLock ignore = lock.obtain()) {
+ String pgTypeName = getPGType(oid);
+ if (pgTypeName == null) {
+ // Technically speaking, we should not be here
+ // null result probably means oid == UNSPECIFIED which has no clear way
+ // to map to Java
+ return "java.lang.String";
+ }
+
+ String result = pgNameToJavaClass.get(pgTypeName);
+ if (result != null) {
+ return result;
+ }
+
+ if (getSQLType(pgTypeName) == Types.ARRAY) {
+ result = "java.sql.Array";
+ pgNameToJavaClass.put(pgTypeName, result);
+ }
+
+ return result == null ? "java.lang.String" : result;
+ }
+ }
+
+ @Override
+ public String getTypeForAlias(String alias) {
+ if ( alias == null ) {
+ return null;
+ }
+ String type = TYPE_ALIASES.get(alias);
+ if (type != null) {
+ return type;
+ }
+ type = TYPE_ALIASES.get(alias.toLowerCase(Locale.ROOT));
+ if (type == null) {
+ type = alias;
+ }
+ //populate for future use
+ TYPE_ALIASES.put(alias, type);
+ return type;
+ }
+
+ @Override
+ public int getPrecision(int oid, int typmod) {
+ oid = convertArrayToBaseOid(oid);
+ switch (oid) {
+ case Oid.INT2:
+ return 5;
+
+ case Oid.OID:
+ case Oid.INT4:
+ return 10;
+
+ case Oid.INT8:
+ return 19;
+
+ case Oid.FLOAT4:
+ // For float4 and float8, we can normally only get 6 and 15
+ // significant digits out, but extra_float_digits may raise
+ // that number by up to two digits.
+ return 8;
+
+ case Oid.FLOAT8:
+ return 17;
+
+ case Oid.NUMERIC:
+ if (typmod == -1) {
+ return 0;
+ }
+ return ((typmod - 4) & 0xFFFF0000) >> 16;
+
+ case Oid.CHAR:
+ case Oid.BOOL:
+ return 1;
+
+ case Oid.BPCHAR:
+ case Oid.VARCHAR:
+ if (typmod == -1) {
+ return unknownLength;
+ }
+ return typmod - 4;
+
+ // datetime types get the
+ // "length in characters of the String representation"
+ case Oid.DATE:
+ case Oid.TIME:
+ case Oid.TIMETZ:
+ case Oid.INTERVAL:
+ case Oid.TIMESTAMP:
+ case Oid.TIMESTAMPTZ:
+ return getDisplaySize(oid, typmod);
+
+ case Oid.BIT:
+ return typmod;
+
+ case Oid.VARBIT:
+ if (typmod == -1) {
+ return unknownLength;
+ }
+ return typmod;
+
+ case Oid.TEXT:
+ case Oid.BYTEA:
+ default:
+ return unknownLength;
+ }
+ }
+
+ @Override
+ public int getScale(int oid, int typmod) {
+ oid = convertArrayToBaseOid(oid);
+ switch (oid) {
+ case Oid.FLOAT4:
+ return 8;
+ case Oid.FLOAT8:
+ return 17;
+ case Oid.NUMERIC:
+ if (typmod == -1) {
+ return 0;
+ }
+ return (typmod - 4) & 0xFFFF;
+ case Oid.TIME:
+ case Oid.TIMETZ:
+ case Oid.TIMESTAMP:
+ case Oid.TIMESTAMPTZ:
+ if (typmod == -1) {
+ return 6;
+ }
+ return typmod;
+ case Oid.INTERVAL:
+ if (typmod == -1) {
+ return 6;
+ }
+ return typmod & 0xFFFF;
+ default:
+ return 0;
+ }
+ }
+
+ @Override
+ public boolean isCaseSensitive(int oid) {
+ oid = convertArrayToBaseOid(oid);
+ switch (oid) {
+ case Oid.OID:
+ case Oid.INT2:
+ case Oid.INT4:
+ case Oid.INT8:
+ case Oid.FLOAT4:
+ case Oid.FLOAT8:
+ case Oid.NUMERIC:
+ case Oid.BOOL:
+ case Oid.BIT:
+ case Oid.VARBIT:
+ case Oid.DATE:
+ case Oid.TIME:
+ case Oid.TIMETZ:
+ case Oid.TIMESTAMP:
+ case Oid.TIMESTAMPTZ:
+ case Oid.INTERVAL:
+ return false;
+ default:
+ return true;
+ }
+ }
+
+ @Override
+ public boolean isSigned(int oid) {
+ oid = convertArrayToBaseOid(oid);
+ switch (oid) {
+ case Oid.INT2:
+ case Oid.INT4:
+ case Oid.INT8:
+ case Oid.FLOAT4:
+ case Oid.FLOAT8:
+ case Oid.NUMERIC:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ @SuppressWarnings("fallthrough")
+ @Override
+ public int getDisplaySize(int oid, int typmod) {
+ oid = convertArrayToBaseOid(oid);
+ switch (oid) {
+ case Oid.INT2:
+ return 6; // -32768 to +32767
+ case Oid.INT4:
+ return 11; // -2147483648 to +2147483647
+ case Oid.OID:
+ return 10; // 0 to 4294967295
+ case Oid.INT8:
+ return 20; // -9223372036854775808 to +9223372036854775807
+ case Oid.FLOAT4:
+ // varies based upon the extra_float_digits GUC.
+ // These values are for the longest possible length.
+ return 15; // sign + 9 digits + decimal point + e + sign + 2 digits
+ case Oid.FLOAT8:
+ return 25; // sign + 18 digits + decimal point + e + sign + 3 digits
+ case Oid.CHAR:
+ return 1;
+ case Oid.BOOL:
+ return 1;
+ case Oid.DATE:
+ return 13; // "4713-01-01 BC" to "01/01/4713 BC" - "31/12/32767"
+ case Oid.TIME:
+ case Oid.TIMETZ:
+ case Oid.TIMESTAMP:
+ case Oid.TIMESTAMPTZ:
+ // Calculate the number of decimal digits + the decimal point.
+ int secondSize;
+ switch (typmod) {
+ case -1:
+ secondSize = 6 + 1;
+ break;
+ case 0:
+ secondSize = 0;
+ break;
+ case 1:
+ // Bizarrely SELECT '0:0:0.1'::time(1); returns 2 digits.
+ secondSize = 2 + 1;
+ break;
+ default:
+ secondSize = typmod + 1;
+ break;
+ }
+
+ // We assume the worst case scenario for all of these.
+ // time = '00:00:00' = 8
+ // date = '5874897-12-31' = 13 (although at large values second precision is lost)
+ // date = '294276-11-20' = 12 --enable-integer-datetimes
+ // zone = '+11:30' = 6;
+
+ switch (oid) {
+ case Oid.TIME:
+ return 8 + secondSize;
+ case Oid.TIMETZ:
+ return 8 + secondSize + 6;
+ case Oid.TIMESTAMP:
+ return 13 + 1 + 8 + secondSize;
+ case Oid.TIMESTAMPTZ:
+ return 13 + 1 + 8 + secondSize + 6;
+ }
+ case Oid.INTERVAL:
+ // SELECT LENGTH('-123456789 years 11 months 33 days 23 hours 10.123456 seconds'::interval);
+ return 49;
+ case Oid.VARCHAR:
+ case Oid.BPCHAR:
+ if (typmod == -1) {
+ return unknownLength;
+ }
+ return typmod - 4;
+ case Oid.NUMERIC:
+ if (typmod == -1) {
+ return 131089; // SELECT LENGTH(pow(10::numeric,131071)); 131071 = 2^17-1
+ }
+ int precision = (typmod - 4 >> 16) & 0xffff;
+ int scale = (typmod - 4) & 0xffff;
+ // sign + digits + decimal point (only if we have nonzero scale)
+ return 1 + precision + (scale != 0 ? 1 : 0);
+ case Oid.BIT:
+ return typmod;
+ case Oid.VARBIT:
+ if (typmod == -1) {
+ return unknownLength;
+ }
+ return typmod;
+ case Oid.TEXT:
+ case Oid.BYTEA:
+ return unknownLength;
+ default:
+ return unknownLength;
+ }
+ }
+
+ @Override
+ public int getMaximumPrecision(int oid) {
+ oid = convertArrayToBaseOid(oid);
+ switch (oid) {
+ case Oid.NUMERIC:
+ return 1000;
+ case Oid.TIME:
+ case Oid.TIMETZ:
+ // Technically this depends on the --enable-integer-datetimes
+ // configure setting. It is 6 with integer and 10 with float.
+ return 6;
+ case Oid.TIMESTAMP:
+ case Oid.TIMESTAMPTZ:
+ case Oid.INTERVAL:
+ return 6;
+ case Oid.BPCHAR:
+ case Oid.VARCHAR:
+ return 10485760;
+ case Oid.BIT:
+ case Oid.VARBIT:
+ return 83886080;
+ default:
+ return 0;
+ }
+ }
+
+ @Override
+ public boolean requiresQuoting(int oid) throws SQLException {
+ int sqlType = getSQLType(oid);
+ return requiresQuotingSqlType(sqlType);
+ }
+
+ /**
+ * Returns true if particular sqlType requires quoting.
+ * This method is used internally by the driver, so it might disappear without notice.
+ *
+ * @param sqlType sql type as in java.sql.Types
+ * @return true if the type requires quoting
+ * @throws SQLException if something goes wrong
+ */
+ @Override
+ public boolean requiresQuotingSqlType(int sqlType) throws SQLException {
+ switch (sqlType) {
+ case Types.BIGINT:
+ case Types.DOUBLE:
+ case Types.FLOAT:
+ case Types.INTEGER:
+ case Types.REAL:
+ case Types.SMALLINT:
+ case Types.TINYINT:
+ case Types.NUMERIC:
+ case Types.DECIMAL:
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int longOidToInt(long oid) throws SQLException {
+ if ((oid & 0xFFFF_FFFF_0000_0000L) != 0) {
+ throw new PSQLException(GT.tr("Value is not an OID: {0}", oid), PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+ }
+
+ return (int) oid;
+ }
+
+ @Override
+ public long intOidToLong(int oid) {
+ return ((long) oid) & 0xFFFFFFFFL;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java b/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java
new file mode 100644
index 0000000..c6376c9
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc;
+
+import org.postgresql.jdbc2.ArrayAssistant;
+import org.postgresql.util.ByteConverter;
+
+import java.util.UUID;
+
+public class UUIDArrayAssistant implements ArrayAssistant {
+
+ public UUIDArrayAssistant() {
+ }
+
+ @Override
+ public Class> baseType() {
+ return UUID.class;
+ }
+
+ @Override
+ public Object buildElement(byte[] bytes, int pos, int len) {
+ return new UUID(ByteConverter.int8(bytes, pos + 0), ByteConverter.int8(bytes, pos + 8));
+ }
+
+ @Override
+ public Object buildElement(String literal) {
+ return UUID.fromString(literal);
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java
new file mode 100644
index 0000000..cc57ac8
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc2;
+
+/**
+ * Implement this interface and register the its instance to ArrayAssistantRegistry, to let Postgres
+ * driver to support more array type.
+ *
+ * @author Minglei Tu
+ */
+public interface ArrayAssistant {
+ /**
+ * get array base type.
+ *
+ * @return array base type
+ */
+ Class> baseType();
+
+ /**
+ * build a array element from its binary bytes.
+ *
+ * @param bytes input bytes
+ * @param pos position in input array
+ * @param len length of the element
+ * @return array element from its binary bytes
+ */
+ Object buildElement(byte[] bytes, int pos, int len);
+
+ /**
+ * build an array element from its literal string.
+ *
+ * @param literal string representation of array element
+ * @return array element
+ */
+ Object buildElement(String literal);
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java
new file mode 100644
index 0000000..59a97f9
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc2;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * Array assistants register here.
+ *
+ * @author Minglei Tu
+ */
+public class ArrayAssistantRegistry {
+ private static final ConcurrentMap ARRAY_ASSISTANT_MAP =
+ new ConcurrentHashMap<>();
+
+ public ArrayAssistantRegistry() {
+ }
+
+ public static ArrayAssistant getAssistant(int oid) {
+ return ARRAY_ASSISTANT_MAP.get(oid);
+ }
+
+ public static void register(int oid, ArrayAssistant assistant) {
+ ARRAY_ASSISTANT_MAP.put(oid, assistant);
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/ConnectionPool.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/ConnectionPool.java
new file mode 100644
index 0000000..f1afca2
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/ConnectionPool.java
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc2.optional;
+
+import org.postgresql.ds.PGConnectionPoolDataSource;
+
+/**
+ * @deprecated Please use {@link PGConnectionPoolDataSource}
+ */
+@SuppressWarnings("serial")
+@Deprecated
+public class ConnectionPool extends PGConnectionPoolDataSource {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/PoolingDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/PoolingDataSource.java
new file mode 100644
index 0000000..f47d373
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/PoolingDataSource.java
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc2.optional;
+
+import org.postgresql.ds.PGPoolingDataSource;
+
+/**
+ * @deprecated Since 42.0.0, see {@link PGPoolingDataSource}
+ */
+@Deprecated
+public class PoolingDataSource extends PGPoolingDataSource {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/SimpleDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/SimpleDataSource.java
new file mode 100644
index 0000000..e907c07
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/SimpleDataSource.java
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc2.optional;
+
+import org.postgresql.ds.PGSimpleDataSource;
+
+/**
+ * @deprecated Please use {@link PGSimpleDataSource}
+ */
+@SuppressWarnings("serial")
+@Deprecated
+public class SimpleDataSource extends PGSimpleDataSource {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3ConnectionPool.java b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3ConnectionPool.java
new file mode 100644
index 0000000..0a56657
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3ConnectionPool.java
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc3;
+
+import org.postgresql.ds.PGConnectionPoolDataSource;
+
+/**
+ * @deprecated Please use {@link PGConnectionPoolDataSource}
+ */
+@SuppressWarnings("serial")
+@Deprecated
+public class Jdbc3ConnectionPool extends PGConnectionPoolDataSource {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3PoolingDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3PoolingDataSource.java
new file mode 100644
index 0000000..10b1920
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3PoolingDataSource.java
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc3;
+
+import org.postgresql.ds.PGPoolingDataSource;
+
+/**
+ * @deprecated Since 42.0.0, see {@link PGPoolingDataSource}
+ */
+@Deprecated
+public class Jdbc3PoolingDataSource extends PGPoolingDataSource {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3SimpleDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3SimpleDataSource.java
new file mode 100644
index 0000000..b61c8b0
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3SimpleDataSource.java
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbc3;
+
+import org.postgresql.ds.PGSimpleDataSource;
+
+/**
+ * @deprecated Please use {@link PGSimpleDataSource}
+ */
+@SuppressWarnings("serial")
+@Deprecated
+public class Jdbc3SimpleDataSource extends PGSimpleDataSource {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java
new file mode 100644
index 0000000..0ca6f05
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbcurlresolver;
+
+import java.net.URI;
+import org.postgresql.PGEnvironment;
+import org.postgresql.util.OSUtil;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * helps to read Password File.
+ * https://www.postgresql.org/docs/current/libpq-pgpass.html
+ */
+public class PgPassParser {
+
+ private static final Logger LOGGER = Logger.getLogger(PgPassParser.class.getName());
+ private static final char SEPARATOR = ':';
+ //
+ private final String hostname;
+ private final String port;
+ private final String database;
+ private final String user;
+
+ //
+ private PgPassParser(String hostname, String port, String database, String user) {
+ this.hostname = hostname;
+ this.port = port;
+ this.database = database;
+ this.user = user;
+ }
+
+ /**
+ * Read .pgpass resource
+ *
+ * @param hostname hostname or *
+ * @param port port or *
+ * @param database database or *
+ * @param user username or *
+ * @return password or null
+ */
+ public static String getPassword(String hostname, String port, String database, String user) {
+ if (hostname == null || hostname.isEmpty()) {
+ return null;
+ }
+ if (port == null || port.isEmpty()) {
+ return null;
+ }
+ if (database == null || database.isEmpty()) {
+ return null;
+ }
+ if (user == null || user.isEmpty()) {
+ return null;
+ }
+ PgPassParser pgPassParser = new PgPassParser(hostname, port, database, user);
+ return pgPassParser.findPassword();
+ }
+
+ private String findPassword() {
+ String resourceName = findPgPasswordResourceName();
+ if (resourceName == null) {
+ return null;
+ }
+ //
+ String result = null;
+ try (InputStream inputStream = openInputStream(resourceName)) {
+ result = parseInputStream(inputStream);
+ } catch (IOException e) {
+ LOGGER.log(Level.FINE, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()});
+ }
+ //
+ return result;
+ }
+
+ // open URL or File
+ private InputStream openInputStream(String resourceName) throws IOException {
+
+ try {
+ URL url = URI.create(resourceName).toURL();
+ return url.openStream();
+ } catch ( MalformedURLException ex ) {
+ // try file
+ File file = new File(resourceName);
+ return new FileInputStream(file);
+ }
+ }
+
+ // choose resource where to search for service description
+ private String findPgPasswordResourceName() {
+ // default file name
+ String pgPassFileDefaultName = PGEnvironment.PGPASSFILE.getDefaultValue();
+
+ // if there is value, use it - 1st priority
+ {
+ String propertyName = PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName();
+ String resourceName = System.getProperty(propertyName);
+ if (resourceName != null && !resourceName.trim().isEmpty()) {
+ LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]", new Object[]{resourceName, propertyName});
+ return resourceName;
+ }
+ }
+
+ // if there is value, use it - 2nd priority
+ {
+ String envVariableName = PGEnvironment.PGPASSFILE.getName();
+ String resourceName = System.getenv().get(envVariableName);
+ if (resourceName != null && !resourceName.trim().isEmpty()) {
+ LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]", new Object[]{resourceName, envVariableName});
+ return resourceName;
+ }
+ }
+
+ // if file in user home is readable, use it, otherwise continue - 3rd priority
+ {
+ String resourceName = "";
+ if ( !OSUtil.isWindows() ) {
+ resourceName += ".";
+ }
+ resourceName += pgPassFileDefaultName;
+ if (OSUtil.isWindows()) {
+ resourceName += ".conf";
+ }
+ File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName);
+ if (resourceFile.canRead()) {
+ LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()});
+ return resourceFile.getAbsolutePath();
+ }
+ }
+
+ // otherwise null
+ LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgPassFileDefaultName);
+ return null;
+ }
+
+ //
+ private String parseInputStream(InputStream inputStream) throws IOException {
+ //
+ String result = null;
+ try (
+ Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
+ BufferedReader br = new BufferedReader(reader)) {
+ //
+ String line;
+ int currentLine = 0;
+ while ((line = br.readLine()) != null) {
+ currentLine++;
+ if (line.trim().isEmpty()) {
+ // skip empty lines
+ continue;
+ } else if (line.startsWith("#")) {
+ // skip lines with comments
+ continue;
+ }
+ // analyze line, accept first matching line
+ result = evaluateLine(line, currentLine);
+ if (result != null) {
+ break;
+ }
+ }
+ }
+ //
+ return result;
+ }
+
+ //
+ private String evaluateLine(String fullLine, int currentLine) {
+ String line = fullLine;
+ String result = null;
+ // check match
+ if ((line = checkForPattern(line, hostname)) != null
+ && (line = checkForPattern(line, port)) != null
+ && (line = checkForPattern(line, database)) != null
+ && (line = checkForPattern(line, user)) != null) {
+ // use remaining line to get password
+ result = extractPassword(line);
+ String lineWithoutPassword = fullLine.substring(0, fullLine.length() - line.length());
+ LOGGER.log(Level.FINE, "Matching line number [{0}] with value prefix [{1}] found for input [{2}:{3}:{4}:{5}]",
+ new Object[]{currentLine, lineWithoutPassword, hostname, port, database, user});
+ }
+ //
+ return result;
+ }
+
+ //
+ private String extractPassword(String line) {
+ StringBuilder sb = new StringBuilder();
+ // take all characters up to separator (which is colon)
+ // remove escaping colon and backslash ("\\ -> \" ; "\: -> :")
+ // single backslash is not considered as error ("\a -> \a")
+ for (int i = 0; i < line.length(); i++) {
+ char chr = line.charAt(i);
+ if (chr == '\\' && (i + 1) < line.length()) {
+ char nextChr = line.charAt(i + 1);
+ if (nextChr == '\\' || nextChr == SEPARATOR) {
+ chr = nextChr;
+ i++;
+ }
+ } else if (chr == SEPARATOR) {
+ break;
+ }
+ sb.append(chr);
+ }
+ return sb.toString();
+ }
+
+ //
+ private String checkForPattern(String line, String value) {
+ String result = null;
+ if (line.startsWith("*:")) {
+ // any value match
+ result = line.substring(2);
+ } else {
+ int lPos = 0;
+ // Why not to split by separator (:) and compare by elements?
+ // Ipv6 makes in tricky. ipv6 may contain different number of colons. Also, to maintain compatibility with libpq.
+ // Compare beginning of line and value char by char.
+ // line may have escaped values, value does not have escaping
+ // line escaping is not mandatory. These are considered equal: "ab\cd:ef" == "ab\\cd\:ef" == "ab\cd\:ef" == "ab\\cd:ef"
+ for (int vPos = 0; vPos < value.length(); vPos++) {
+ if (lPos >= line.length()) {
+ return null;
+ }
+ char l = line.charAt(lPos);
+ if (l == '\\') {
+ if ((lPos + 1) >= line.length()) {
+ return null;
+ }
+ char next = line.charAt(lPos + 1);
+ if (next == '\\' || next == SEPARATOR) {
+ l = next;
+ lPos++;
+ }
+ }
+ lPos++;
+ char v = value.charAt(vPos);
+ if (l != v) {
+ return null;
+ }
+ }
+ if (line.charAt(lPos) == SEPARATOR) {
+ result = line.substring(lPos + 1);
+ }
+ }
+ return result;
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java
new file mode 100644
index 0000000..7bca1b1
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.jdbcurlresolver;
+
+import java.net.URI;
+import org.postgresql.PGEnvironment;
+import org.postgresql.PGProperty;
+import org.postgresql.util.OSUtil;
+import org.postgresql.util.PGPropertyUtil;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+/**
+ * helps to read Connection Service File.
+ * https://www.postgresql.org/docs/current/libpq-pgservice.html
+ */
+public class PgServiceConfParser {
+
+ private static final Logger LOGGER = Logger.getLogger(PgServiceConfParser.class.getName());
+ private final String serviceName;
+ private boolean ignoreIfOpenFails = true;
+
+ private PgServiceConfParser(String serviceName) {
+ this.serviceName = serviceName;
+ }
+
+ /**
+ * Read pg_service.conf resource
+ *
+ * @param serviceName service name to search for
+ * @return key value pairs
+ */
+ public static Properties getServiceProperties(String serviceName) {
+ PgServiceConfParser pgServiceConfParser = new PgServiceConfParser(serviceName);
+ return pgServiceConfParser.findServiceDescription();
+ }
+
+ private Properties findServiceDescription() {
+ String resourceName = findPgServiceConfResourceName();
+ if (resourceName == null) {
+ return null;
+ }
+ //
+ Properties result = null;
+ try (InputStream inputStream = openInputStream(resourceName)) {
+ result = parseInputStream(inputStream);
+ } catch (IOException e) {
+ Level level = ignoreIfOpenFails ? Level.FINE : Level.WARNING;
+ LOGGER.log(level, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()});
+ }
+ //
+ return result;
+ }
+
+ // open URL or File
+ private InputStream openInputStream(String resourceName) throws IOException {
+
+ try {
+ URL url = URI.create(resourceName).toURL();
+ return url.openStream();
+ } catch ( MalformedURLException ex ) {
+ // try file
+ File file = new File(resourceName);
+ return new FileInputStream(file);
+ }
+ }
+
+ // choose resource where to search for service description
+ private String findPgServiceConfResourceName() {
+ // default file name
+ String pgServiceConfFileDefaultName = PGEnvironment.PGSERVICEFILE.getDefaultValue();
+
+ // if there is value, use it - 1st priority
+ {
+ String propertyName = PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName();
+ String resourceName = System.getProperty(propertyName);
+ if (resourceName != null && !resourceName.trim().isEmpty()) {
+ this.ignoreIfOpenFails = false;
+ LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]",
+ new Object[]{resourceName, propertyName});
+ return resourceName;
+ }
+ }
+
+ // if there is value, use it - 2nd priority
+ {
+ String envVariableName = PGEnvironment.PGSERVICEFILE.getName();
+ String resourceName = System.getenv().get(envVariableName);
+ if (resourceName != null && !resourceName.trim().isEmpty()) {
+ this.ignoreIfOpenFails = false;
+ LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]",
+ new Object[]{resourceName, envVariableName});
+ return resourceName;
+ }
+ }
+
+ /*
+ if file in user home is readable, use it, otherwise continue - 3rd priority
+ in the case that the file is in the user home directory it is prepended with '.'
+ */
+ {
+ String resourceName = "." + pgServiceConfFileDefaultName;
+ File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName);
+ if (resourceFile.canRead()) {
+ LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()});
+ return resourceFile.getAbsolutePath();
+ }
+ }
+
+ // if there is value, use it - 4th priority
+ {
+ String envVariableName = PGEnvironment.PGSYSCONFDIR.getName();
+ String pgSysconfDir = System.getenv().get(envVariableName);
+ if (pgSysconfDir != null && !pgSysconfDir.trim().isEmpty()) {
+ String resourceName = pgSysconfDir + File.separator + pgServiceConfFileDefaultName;
+ LOGGER.log(Level.FINE, "Value [{0}] selected using environment variable [{1}]", new Object[]{resourceName, envVariableName});
+ return resourceName;
+ }
+ }
+ // otherwise null
+ LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgServiceConfFileDefaultName);
+ return null;
+ }
+
+ /*
+ # Requirements for stream handling (have to match with libpq behaviour)
+ #
+ # space around line is removed
+ # Line: " host=my-host "
+ # equal to : "host=my-host"
+ # keys are case sensitive
+ # Line: "host=my-host"
+ # not equal to : "HOST=my-host"
+ # keys are limited with values described in enum PGEnvironment field name
+ # key is invalid: "my-host=my-host"
+ # unexpected keys produce error
+ # Example: "my-host=my-host"
+ # Example: "HOST=my-host"
+ # space before equal sign becomes part of key
+ # Line: "host =my-host"
+ # key equals: "host "
+ # space after equal sign becomes part of value
+ # Line: "host= my-host"
+ # key equals: " my-host"
+ # in case of duplicate section - first entry counts
+ # Line: "[service-one]"
+ # Line: "host=host-one"
+ # Line: "[service-two]"
+ # Line: "host=host-two"
+ # --> section-one is selected
+ # in case of duplicate key - first entry counts
+ # Line: "[service-one]"
+ # Line: "host=host-one"
+ # Line: "host=host-two"
+ # --> host-one is selected
+ # service name is case sensitive
+ # Line: "[service-one]"
+ # Line: "[service-ONE]"
+ # --> these are unique service names
+ # whatever is between brackets is considered as service name (including space)
+ # Line: "[ service-ONE]"
+ # Line: "[service-ONE ]"
+ # Line: "[service ONE]"
+ # --> these are unique service names
+ */
+ private Properties parseInputStream(InputStream inputStream) throws IOException {
+ // build set of allowed keys
+ Set allowedServiceKeys = Arrays.stream(PGProperty.values())
+ .map(PGProperty::getName)
+ .map(PGPropertyUtil::translatePGPropertyToPGService)
+ .collect(Collectors.toSet());
+
+ //
+ Properties result = new Properties();
+ boolean isFound = false;
+ try (
+ Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
+ BufferedReader br = new BufferedReader(reader)) {
+ //
+ String originalLine;
+ String line;
+ int lineNumber = 0;
+ while ((originalLine = br.readLine()) != null) {
+ lineNumber++;
+ // remove spaces around it
+ line = originalLine.trim();
+ // skip if empty line or starts with comment sign
+ if (line.isEmpty() || line.startsWith("#")) {
+ continue;
+ }
+ // find first equal sign
+ int indexOfEqualSign = line.indexOf("=");
+ // is it section start?
+ if (line.startsWith("[") && line.endsWith("]")) {
+ // stop processing if section with correct name was found already
+ if (isFound) {
+ break;
+ }
+ // get name of section
+ String sectionName = line.substring(1, line.length() - 1);
+ // if match then mark it as section is found
+ if (serviceName.equals(sectionName)) {
+ isFound = true;
+ }
+ } else if (!isFound) {
+ // skip further processing until section is found
+ continue;
+ } else if (indexOfEqualSign > 1) {
+ // get key and value
+ String key = line.substring(0, indexOfEqualSign);
+ String value = line.substring(indexOfEqualSign + 1);
+ // check key against set of allowed keys
+ if (!allowedServiceKeys.contains(key)) {
+ // log list of allowed keys
+ String allowedValuesCommaSeparated =
+ allowedServiceKeys.stream().sorted().collect(Collectors.joining(","));
+ LOGGER.log(Level.SEVERE, "Got invalid key: line number [{0}], value [{1}], allowed "
+ + "values [{2}]",
+ new Object[]{lineNumber, originalLine, allowedValuesCommaSeparated});
+ // stop processing because of invalid key
+ return null;
+ }
+ // ignore line if value is missing
+ if (!value.isEmpty()) {
+ // ignore line having duplicate key, otherwise store key-value pair
+ result.putIfAbsent(PGPropertyUtil.translatePGServiceToPGProperty(key), value);
+ }
+ } else {
+ // if not equal sign then stop processing because of invalid syntax
+ LOGGER.log(Level.WARNING, "Not valid line: line number [{0}], value [{1}]",
+ new Object[]{lineNumber, originalLine});
+ return null;
+ }
+ }
+ }
+ // null means failure - service is not found
+ return isFound ? result : null;
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java
new file mode 100644
index 0000000..15beab6
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.largeobject;
+
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.GT;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.SQLException;
+
+/**
+ * This is an implementation of an InputStream from a large object.
+ */
+@SuppressWarnings("try")
+public class BlobInputStream extends InputStream {
+ static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024;
+ static final int INITIAL_BUFFER_SIZE = 64 * 1024;
+
+ /**
+ * The parent LargeObject.
+ */
+ private LargeObject lo;
+ private final ResourceLock lock = new ResourceLock();
+
+ /**
+ * The absolute position.
+ */
+ private long absolutePosition;
+
+ /**
+ * Buffer used to improve performance.
+ */
+ private byte [] buffer;
+
+ /**
+ * Position within buffer.
+ */
+ private int bufferPosition;
+
+ /**
+ * The amount of bytes to read on the next read.
+ * Currently, we nullify {@link #buffer}, so we can't use {@code buffer.length}.
+ */
+ private int lastBufferSize;
+
+ /**
+ * The buffer size.
+ */
+ private final int maxBufferSize;
+
+ /**
+ * The mark position.
+ */
+ private long markPosition;
+
+ /**
+ * The limit.
+ */
+ private final long limit;
+
+ /**
+ * @param lo LargeObject to read from
+ */
+ public BlobInputStream(LargeObject lo) {
+ this(lo, DEFAULT_MAX_BUFFER_SIZE);
+ }
+
+ /**
+ * @param lo LargeObject to read from
+ * @param bsize buffer size
+ */
+
+ public BlobInputStream(LargeObject lo, int bsize) {
+ this(lo, bsize, Long.MAX_VALUE);
+ }
+
+ /**
+ * @param lo LargeObject to read from
+ * @param bsize buffer size
+ * @param limit max number of bytes to read
+ */
+ public BlobInputStream(LargeObject lo, int bsize, long limit) {
+ this.lo = lo;
+ this.maxBufferSize = bsize;
+ // The very first read multiplies the last buffer size by two, so we divide by two to get
+ // the first read to be exactly the initial buffer size
+ this.lastBufferSize = INITIAL_BUFFER_SIZE / 2;
+ // Treat -1 as no limit for backward compatibility
+ this.limit = limit == -1 ? Long.MAX_VALUE : limit;
+ }
+
+ /**
+ * The minimum required to implement input stream.
+ */
+ @Override
+ public int read() throws IOException {
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject lo = getLo();
+ if (absolutePosition >= limit) {
+ buffer = null;
+ bufferPosition = 0;
+ return -1;
+ }
+ // read more in if necessary
+ if (buffer == null || bufferPosition >= buffer.length) {
+ // Don't hold the buffer while waiting for DB to respond
+ // Note: lo.read(...) does not support "fetching the response into the user-provided buffer"
+ // See https://github.com/pgjdbc/pgjdbc/issues/3043
+ int nextBufferSize = getNextBufferSize(1);
+ buffer = lo.read(nextBufferSize);
+ bufferPosition = 0;
+
+ if (buffer.length == 0) {
+ // The lob does not produce any more data, so we are at the end of the stream
+ return -1;
+ }
+ }
+
+ int ret = buffer[bufferPosition] & 0xFF;
+
+ bufferPosition++;
+ absolutePosition++;
+ if (bufferPosition >= buffer.length) {
+ // TODO: support buffer reuse in mark/reset
+ buffer = null;
+ bufferPosition = 0;
+ }
+
+ return ret;
+ } catch (SQLException e) {
+ long loId = lo == null ? -1 : lo.getLongOID();
+ throw new IOException(
+ GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}",
+ loId, absolutePosition, lastBufferSize),
+ e);
+ }
+ }
+
+ /**
+ * Computes the next buffer size to use for reading data from the large object.
+ * The idea is to avoid allocating too much memory, especially if the user will use just a few
+ * bytes of the data.
+ * @param len estimated read request
+ * @return next buffer size or {@link #maxBufferSize} if the buffer should not be increased
+ */
+ private int getNextBufferSize(int len) {
+ int nextBufferSize = Math.min(maxBufferSize, this.lastBufferSize * 2);
+ if (len > nextBufferSize) {
+ nextBufferSize = Math.min(maxBufferSize, Integer.highestOneBit(len * 2));
+ }
+ this.lastBufferSize = nextBufferSize;
+ return nextBufferSize;
+ }
+
+ @Override
+ public int read(byte[] dest, int off, int len) throws IOException {
+ if (len == 0) {
+ return 0;
+ }
+ try (ResourceLock ignore = lock.obtain()) {
+ int bytesCopied = 0;
+ LargeObject lo = getLo();
+
+ // Check to make sure we aren't at the limit.
+ if (absolutePosition >= limit) {
+ return -1;
+ }
+
+ // Check to make sure we are not going to read past the limit
+ len = Math.min(len, (int) Math.min(limit - absolutePosition, Integer.MAX_VALUE));
+
+ // have we read anything into the buffer
+ if (buffer != null) {
+ // now figure out how much data is in the buffer
+ int bytesInBuffer = buffer.length - bufferPosition;
+ // figure out how many bytes the user wants
+ int bytesToCopy = Math.min(len, bytesInBuffer);
+ // copy them in
+ System.arraycopy(buffer, bufferPosition, dest, off, bytesToCopy);
+ // move the buffer position
+ bufferPosition += bytesToCopy;
+ if (bufferPosition >= buffer.length) {
+ // TODO: support buffer reuse in mark/reset
+ buffer = null;
+ bufferPosition = 0;
+ }
+ // position in the blob
+ absolutePosition += bytesToCopy;
+ // increment offset
+ off += bytesToCopy;
+ // decrement the length
+ len -= bytesToCopy;
+ bytesCopied = bytesToCopy;
+ }
+
+ if (len > 0) {
+ int nextBufferSize = getNextBufferSize(len);
+ // We are going to read data past the existing buffer, so we release the memory
+ // before making a DB call
+ buffer = null;
+ bufferPosition = 0;
+ int bytesRead;
+ try {
+ if (len >= nextBufferSize) {
+ // Read directly into the user's buffer
+ bytesRead = lo.read(dest, off, len);
+ } else {
+ // Refill the buffer and copy from it
+ buffer = lo.read(nextBufferSize);
+ // Note that actual number of bytes read may be less than requested
+ bytesRead = Math.min(len, buffer.length);
+ System.arraycopy(buffer, 0, dest, off, bytesRead);
+ // If we at the end of the stream, and we just copied the last bytes,
+ // we can release the buffer
+ if (bytesRead == buffer.length) {
+ // TODO: if we want to reuse the buffer in mark/reset we should not release the
+ // buffer here
+ buffer = null;
+ bufferPosition = 0;
+ } else {
+ bufferPosition = bytesRead;
+ }
+ }
+ } catch (SQLException ex) {
+ throw new IOException(
+ GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}",
+ lo.getLongOID(), absolutePosition, len),
+ ex);
+ }
+ bytesCopied += bytesRead;
+ absolutePosition += bytesRead;
+ }
+ return bytesCopied == 0 ? -1 : bytesCopied;
+ }
+ }
+
+ /**
+ * Closes this input stream and releases any system resources associated with the stream.
+ *
+ * The close
method of InputStream
does nothing.
+ *
+ * @throws IOException if an I/O error occurs.
+ */
+ @Override
+ public void close() throws IOException {
+ long loId = 0;
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject lo = this.lo;
+ if (lo != null) {
+ loId = lo.getLongOID();
+ lo.close();
+ }
+ this.lo = null;
+ } catch (SQLException e) {
+ throw new IOException(
+ GT.tr("Can not close large object {0}",
+ loId),
+ e);
+ }
+ }
+
+ /**
+ * Marks the current position in this input stream. A subsequent call to the reset
+ * method repositions this stream at the last marked position so that subsequent reads re-read the
+ * same bytes.
+ *
+ * The readlimit
arguments tells this input stream to allow that many bytes to be
+ * read before the mark position gets invalidated.
+ *
+ * The general contract of mark
is that, if the method markSupported
+ * returns true
, the stream somehow remembers all the bytes read after the call to
+ * mark
and stands ready to supply those same bytes again if and whenever the method
+ * reset
is called. However, the stream is not required to remember any data at all
+ * if more than readlimit
bytes are read from the stream before reset
is
+ * called.
+ *
+ * Marking a closed stream should not have any effect on the stream.
+ *
+ * @param readlimit the maximum limit of bytes that can be read before the mark position becomes
+ * invalid.
+ * @see java.io.InputStream#reset()
+ */
+ @Override
+ public void mark(int readlimit) {
+ try (ResourceLock ignore = lock.obtain()) {
+ markPosition = absolutePosition;
+ }
+ }
+
+ /**
+ * Repositions this stream to the position at the time the mark
method was last
+ * called on this input stream. NB: If mark is not called we move to the beginning.
+ *
+ * @see java.io.InputStream#mark(int)
+ * @see java.io.IOException
+ */
+ @Override
+ public void reset() throws IOException {
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject lo = getLo();
+ long loId = lo.getLongOID();
+ try {
+ if (markPosition <= Integer.MAX_VALUE) {
+ lo.seek((int) markPosition);
+ } else {
+ lo.seek64(markPosition, LargeObject.SEEK_SET);
+ }
+ buffer = null;
+ absolutePosition = markPosition;
+ } catch (SQLException e) {
+ throw new IOException(
+ GT.tr("Can not reset stream for large object {0} to position {1}",
+ loId, markPosition),
+ e);
+ }
+ }
+ }
+
+ /**
+ * Tests if this input stream supports the mark
and reset
methods. The
+ * markSupported
method of InputStream
returns false
.
+ *
+ * @return true
if this true type supports the mark and reset method;
+ * false
otherwise.
+ * @see java.io.InputStream#mark(int)
+ * @see java.io.InputStream#reset()
+ */
+ @Override
+ public boolean markSupported() {
+ return true;
+ }
+
+ private LargeObject getLo() throws IOException {
+ if (lo == null) {
+ throw new IOException("BlobOutputStream is closed");
+ }
+ return lo;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java
new file mode 100644
index 0000000..2636ee4
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.largeobject;
+
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.ByteStreamWriter;
+import org.postgresql.util.GT;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.sql.SQLException;
+
+/**
+ * This implements a basic output stream that writes to a LargeObject.
+ */
+@SuppressWarnings("try")
+public class BlobOutputStream extends OutputStream {
+ static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024;
+
+ /**
+ * The parent LargeObject.
+ */
+ private LargeObject lo;
+ private final ResourceLock lock = new ResourceLock();
+
+ /**
+ * Buffer.
+ */
+ private byte [] buf;
+
+ /**
+ * Size of the buffer (default 1K).
+ */
+ private final int maxBufferSize;
+
+ /**
+ * Position within the buffer.
+ */
+ private int bufferPosition;
+
+ /**
+ * Create an OutputStream to a large object.
+ *
+ * @param lo LargeObject
+ */
+ public BlobOutputStream(LargeObject lo) {
+ this(lo, DEFAULT_MAX_BUFFER_SIZE);
+ }
+
+ /**
+ * Create an OutputStream to a large object.
+ *
+ * @param lo LargeObject
+ * @param bufferSize The size of the buffer for single-byte writes
+ */
+ public BlobOutputStream(LargeObject lo, int bufferSize) {
+ this.lo = lo;
+ // Avoid "0" buffer size, and ensure the bufferSize will always be a power of two
+ this.maxBufferSize = Integer.highestOneBit(Math.max(bufferSize, 1));
+ }
+
+ /**
+ * Grows an internal buffer to ensure the extra bytes fit in the buffer.
+ * @param extraBytes the number of extra bytes that should fit in the buffer
+ * @return new buffer
+ */
+ private byte[] growBuffer(int extraBytes) {
+ byte[] buf = this.buf;
+ if (buf != null && (buf.length == maxBufferSize || buf.length - bufferPosition >= extraBytes)) {
+ // Buffer is already large enough
+ return buf;
+ }
+ // We use power-of-two buffers, so they align nicely with PostgreSQL's LargeObject slicing
+ // By default PostgreSQL slices the data in 2KiB chunks
+ int newSize = Math.min(maxBufferSize, Integer.highestOneBit(bufferPosition + extraBytes) * 2);
+ byte[] newBuffer = new byte[newSize];
+ if (buf != null && bufferPosition != 0) {
+ // There was some data in the old buffer, copy it over
+ System.arraycopy(buf, 0, newBuffer, 0, bufferPosition);
+ }
+ this.buf = newBuffer;
+ return newBuffer;
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ long loId = 0;
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject lo = checkClosed();
+ loId = lo.getLongOID();
+ byte[] buf = growBuffer(16);
+ if (bufferPosition >= buf.length) {
+ lo.write(buf);
+ bufferPosition = 0;
+ }
+ buf[bufferPosition++] = (byte) b;
+ } catch (SQLException e) {
+ throw new IOException(
+ GT.tr("Can not write data to large object {0}, requested write length: {1}",
+ loId, 1),
+ e);
+ }
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ long loId = 0;
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject lo = checkClosed();
+ loId = lo.getLongOID();
+ byte[] buf = this.buf;
+ int totalData = bufferPosition + len;
+ // We have two parts of the data (it goes sequentially):
+ // 1) Data in buf at positions [0, bufferPosition)
+ // 2) Data in b at positions [off, off + len)
+ // If the new data fits into the buffer, we just copy it there.
+ // Otherwise, it might sound nice idea to just write them to the database, unfortunately,
+ // it is not optimal, as PostgreSQL chunks LargeObjects into 2KiB rows.
+ // That is why we would like to avoid writing a part of 2KiB chunk, and then issue overwrite
+ // causing DB to load and update the row.
+ //
+ // In fact, LOBLKSIZE is BLCKSZ/4, so users might have different values, so we use
+ // 8KiB write alignment for larger buffer sizes just in case.
+ //
+ // | buf[0] ... buf[bufferPosition] | b[off] ... b[off + len] |
+ // |<----------------- totalData ---------------------------->|
+ // If the total data does not align with 2048, we might have some remainder that we will
+ // copy to the beginning of the buffer and write later.
+ // The remainder can fall into either b (e.g. if the requested len is big enough):
+ //
+ // | buf[0] ... buf[bufferPosition] | b[off] ........ b[off + len] |
+ // |<----------------- totalData --------------------------------->|
+ // |<-------writeFromBuf----------->|<-writeFromB->|<--tailLength->|
+ //
+ // or
+ // buf (e.g. if the requested write len is small yet it does not fit into the max buffer size):
+ // | buf[0] .................... buf[bufferPosition] | b[off] .. b[off + len] |
+ // |<----------------- totalData -------------------------------------------->|
+ // |<-------writeFromBuf---------------->|<--------tailLength---------------->|
+ // "writeFromB" will be zero in that case
+
+ // We want aligned writes, so the write requests chunk nicely into large object rows
+ int tailLength =
+ maxBufferSize >= 8192 ? totalData % 8192 : (
+ maxBufferSize >= 2048 ? totalData % 2048 : 0
+ );
+
+ if (totalData >= maxBufferSize) {
+ // The resulting data won't fit into the buffer, so we flush the data to the database
+ int writeFromBuffer = Math.min(bufferPosition, totalData - tailLength);
+ int writeFromB = Math.max(0, totalData - writeFromBuffer - tailLength);
+ if (buf == null || bufferPosition <= 0) {
+ // The buffer is empty, so we can write the data directly
+ lo.write(b, off, writeFromB);
+ } else {
+ if (writeFromB == 0) {
+ lo.write(buf, 0, writeFromBuffer);
+ } else {
+ lo.write(
+ ByteStreamWriter.of(
+ ByteBuffer.wrap(buf, 0, writeFromBuffer),
+ ByteBuffer.wrap(b, off, writeFromB)));
+ }
+ // There might be some data left in the buffer since we keep the tail
+ if (writeFromBuffer >= bufferPosition) {
+ // The buffer was fully written to the database
+ bufferPosition = 0;
+ } else {
+ // Copy the rest to the beginning
+ System.arraycopy(buf, writeFromBuffer, buf, 0, bufferPosition - writeFromBuffer);
+ bufferPosition -= writeFromBuffer;
+ }
+ }
+ len -= writeFromB;
+ off += writeFromB;
+ }
+ if (len > 0) {
+ buf = growBuffer(len);
+ System.arraycopy(b, off, buf, bufferPosition, len);
+ bufferPosition += len;
+ }
+ } catch (SQLException e) {
+ throw new IOException(
+ GT.tr("Can not write data to large object {0}, requested write length: {1}",
+ loId, len),
+ e);
+ }
+ }
+
+ /**
+ * Flushes this output stream and forces any buffered output bytes to be written out. The general
+ * contract of flush
is that calling it is an indication that, if any bytes
+ * previously written have been buffered by the implementation of the output stream, such bytes
+ * should immediately be written to their intended destination.
+ *
+ * @throws IOException if an I/O error occurs.
+ */
+ @Override
+ public void flush() throws IOException {
+ long loId = 0;
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject lo = checkClosed();
+ loId = lo.getLongOID();
+ byte[] buf = this.buf;
+ if (buf != null && bufferPosition > 0) {
+ lo.write(buf, 0, bufferPosition);
+ }
+ bufferPosition = 0;
+ } catch (SQLException e) {
+ throw new IOException(
+ GT.tr("Can not flush large object {0}",
+ loId),
+ e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ long loId = 0;
+ try (ResourceLock ignore = lock.obtain()) {
+ LargeObject lo = this.lo;
+ if (lo != null) {
+ loId = lo.getLongOID();
+ flush();
+ lo.close();
+ this.lo = null;
+ }
+ } catch (SQLException e) {
+ throw new IOException(
+ GT.tr("Can not close large object {0}",
+ loId),
+ e);
+ }
+ }
+
+ private LargeObject checkClosed() throws IOException {
+ if (lo == null) {
+ throw new IOException("BlobOutputStream is closed");
+ }
+ return lo;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java
new file mode 100644
index 0000000..f56812e
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.largeobject;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.fastpath.Fastpath;
+import org.postgresql.fastpath.FastpathArg;
+import org.postgresql.util.ByteStreamWriter;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.sql.SQLException;
+
+/**
+ * This class provides the basic methods required to run the interface, plus a pair of methods that
+ * provide InputStream and OutputStream classes for this object.
+ *
+ * Normally, client code would use the getAsciiStream, getBinaryStream, or getUnicodeStream methods
+ * in ResultSet, or setAsciiStream, setBinaryStream, or setUnicodeStream methods in
+ * PreparedStatement to access Large Objects.
+ *
+ * However, sometimes lower level access to Large Objects are required, that are not supported by
+ * the JDBC specification.
+ *
+ * Refer to org.postgresql.largeobject.LargeObjectManager on how to gain access to a Large Object,
+ * or how to create one.
+ *
+ * @see org.postgresql.largeobject.LargeObjectManager
+ * @see java.sql.ResultSet#getAsciiStream
+ * @see java.sql.ResultSet#getBinaryStream
+ * @see java.sql.ResultSet#getUnicodeStream
+ * @see java.sql.PreparedStatement#setAsciiStream
+ * @see java.sql.PreparedStatement#setBinaryStream
+ * @see java.sql.PreparedStatement#setUnicodeStream
+ */
+@SuppressWarnings("deprecation") // support for deprecated Fastpath API
+public class LargeObject
+ implements AutoCloseable {
+
+ /**
+ * Indicates a seek from the beginning of a file.
+ */
+ public static final int SEEK_SET = 0;
+
+ /**
+ * Indicates a seek from the current position.
+ */
+ public static final int SEEK_CUR = 1;
+
+ /**
+ * Indicates a seek from the end of a file.
+ */
+ public static final int SEEK_END = 2;
+
+ private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
+
+ private final Fastpath fp; // Fastpath API to use
+ private final long oid; // OID of this object
+ private final int mode; // read/write mode of this object
+ private final int fd; // the descriptor of the open large object
+
+ private BlobOutputStream os; // The current output stream
+
+ private boolean closed; // true when we are closed
+
+ private BaseConnection conn; // Only initialized when open a LOB with CommitOnClose
+ private final boolean commitOnClose; // Only initialized when open a LOB with CommitOnClose
+
+ /**
+ * This opens a large object.
+ *
+ * If the object does not exist, then an SQLException is thrown.
+ *
+ * @param fp FastPath API for the connection to use
+ * @param oid of the Large Object to open
+ * @param mode Mode of opening the large object
+ * @param conn the connection to the database used to access this LOB
+ * @param commitOnClose commit the transaction when this LOB will be closed (defined in
+ * LargeObjectManager)
+ * @throws SQLException if a database-access error occurs.
+ * @see org.postgresql.largeobject.LargeObjectManager
+ */
+ protected LargeObject(Fastpath fp, long oid, int mode,
+ BaseConnection conn, boolean commitOnClose)
+ throws SQLException {
+ this.fp = fp;
+ this.oid = oid;
+ this.mode = mode;
+ if (commitOnClose) {
+ this.commitOnClose = true;
+ this.conn = conn;
+ } else {
+ this.commitOnClose = false;
+ }
+
+ FastpathArg[] args = new FastpathArg[2];
+ args[0] = Fastpath.createOIDArg(oid);
+ args[1] = new FastpathArg(mode);
+ this.fd = fp.getInteger("lo_open", args);
+ }
+
+ /**
+ * This opens a large object.
+ *
+ * If the object does not exist, then an SQLException is thrown.
+ *
+ * @param fp FastPath API for the connection to use
+ * @param oid of the Large Object to open
+ * @param mode Mode of opening the large object (defined in LargeObjectManager)
+ * @throws SQLException if a database-access error occurs.
+ * @see org.postgresql.largeobject.LargeObjectManager
+ */
+ protected LargeObject(Fastpath fp, long oid, int mode) throws SQLException {
+ this(fp, oid, mode, null, false);
+ }
+
+ public LargeObject copy() throws SQLException {
+ return new LargeObject(fp, oid, mode);
+ }
+
+ /*
+ * Release large object resources during garbage cleanup.
+ *
+ * This code used to call close() however that was problematic because the scope of the fd is a
+ * transaction, thus if commit or rollback was called before garbage collection ran then the call
+ * to close would error out with an invalid large object handle. So this method now does nothing
+ * and lets the server handle cleanup when it ends the transaction.
+ *
+ * protected void finalize() throws SQLException { }
+ */
+
+ /**
+ * @return the OID of this LargeObject
+ * @deprecated As of 8.3, replaced by {@link #getLongOID()}
+ */
+ @Deprecated
+ public int getOID() {
+ return (int) oid;
+ }
+
+ /**
+ * @return the OID of this LargeObject
+ */
+ public long getLongOID() {
+ return oid;
+ }
+
+ /**
+ * This method closes the object. You must not call methods in this object after this is called.
+ *
+ * @throws SQLException if a database-access error occurs.
+ */
+ @Override
+ public void close() throws SQLException {
+ if (!closed) {
+ // flush any open output streams
+ if (os != null) {
+ try {
+ // we can't call os.close() otherwise we go into an infinite loop!
+ os.flush();
+ } catch (IOException ioe) {
+ throw new PSQLException("Exception flushing output stream", PSQLState.DATA_ERROR, ioe);
+ } finally {
+ os = null;
+ }
+ }
+
+ // finally close
+ FastpathArg[] args = new FastpathArg[1];
+ args[0] = new FastpathArg(fd);
+ fp.fastpath("lo_close", args); // true here as we dont care!!
+ closed = true;
+ BaseConnection conn = this.conn;
+ if (this.commitOnClose && conn != null) {
+ conn.commit();
+ }
+ }
+ }
+
+ /**
+ * Reads some data from the object, and return as a byte[] array.
+ *
+ * @param len number of bytes to read
+ * @return byte[] array containing data read
+ * @throws SQLException if a database-access error occurs.
+ */
+ public byte[] read(int len) throws SQLException {
+ // This is the original method, where the entire block (len bytes)
+ // is retrieved in one go.
+ FastpathArg[] args = new FastpathArg[2];
+ args[0] = new FastpathArg(fd);
+ args[1] = new FastpathArg(len);
+ byte[] bytes = fp.getData("loread", args);
+ if (bytes == null) {
+ return EMPTY_BYTE_ARRAY;
+ }
+ return bytes;
+ }
+
+ /**
+ * Reads some data from the object into an existing array.
+ *
+ * @param buf destination array
+ * @param off offset within array
+ * @param len number of bytes to read
+ * @return the number of bytes actually read
+ * @throws SQLException if a database-access error occurs.
+ */
+ public int read(byte[] buf, int off, int len) throws SQLException {
+ byte[] b = read(len);
+ if (b.length == 0) {
+ return 0;
+ }
+ len = Math.min(len, b.length);
+ System.arraycopy(b, 0, buf, off, len);
+ return len;
+ }
+
+ /**
+ * Writes an array to the object.
+ *
+ * @param buf array to write
+ * @throws SQLException if a database-access error occurs.
+ */
+ public void write(byte[] buf) throws SQLException {
+ FastpathArg[] args = new FastpathArg[2];
+ args[0] = new FastpathArg(fd);
+ args[1] = new FastpathArg(buf);
+ fp.fastpath("lowrite", args);
+ }
+
+ /**
+ * Writes some data from an array to the object.
+ *
+ * @param buf destination array
+ * @param off offset within array
+ * @param len number of bytes to write
+ * @throws SQLException if a database-access error occurs.
+ */
+ public void write(byte[] buf, int off, int len) throws SQLException {
+ FastpathArg[] args = new FastpathArg[2];
+ args[0] = new FastpathArg(fd);
+ args[1] = new FastpathArg(buf, off, len);
+ fp.fastpath("lowrite", args);
+ }
+
+ /**
+ * Writes some data from a given writer to the object.
+ *
+ * @param writer the source of the data to write
+ * @throws SQLException if a database-access error occurs.
+ */
+ public void write(ByteStreamWriter writer) throws SQLException {
+ FastpathArg[] args = new FastpathArg[2];
+ args[0] = new FastpathArg(fd);
+ args[1] = FastpathArg.of(writer);
+ fp.fastpath("lowrite", args);
+ }
+
+ /**
+ * Sets the current position within the object.
+ *
+ * This is similar to the fseek() call in the standard C library. It allows you to have random
+ * access to the large object.
+ *
+ * @param pos position within object
+ * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END
+ * @throws SQLException if a database-access error occurs.
+ */
+ public void seek(int pos, int ref) throws SQLException {
+ FastpathArg[] args = new FastpathArg[3];
+ args[0] = new FastpathArg(fd);
+ args[1] = new FastpathArg(pos);
+ args[2] = new FastpathArg(ref);
+ fp.fastpath("lo_lseek", args);
+ }
+
+ /**
+ * Sets the current position within the object using 64-bit value (9.3+).
+ *
+ * @param pos position within object
+ * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END
+ * @throws SQLException if a database-access error occurs.
+ */
+ public void seek64(long pos, int ref) throws SQLException {
+ FastpathArg[] args = new FastpathArg[3];
+ args[0] = new FastpathArg(fd);
+ args[1] = new FastpathArg(pos);
+ args[2] = new FastpathArg(ref);
+ fp.fastpath("lo_lseek64", args);
+ }
+
+ /**
+ * Sets the current position within the object.
+ *
+ * This is similar to the fseek() call in the standard C library. It allows you to have random
+ * access to the large object.
+ *
+ * @param pos position within object from beginning
+ * @throws SQLException if a database-access error occurs.
+ */
+ public void seek(int pos) throws SQLException {
+ seek(pos, SEEK_SET);
+ }
+
+ /**
+ * @return the current position within the object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public int tell() throws SQLException {
+ FastpathArg[] args = new FastpathArg[1];
+ args[0] = new FastpathArg(fd);
+ return fp.getInteger("lo_tell", args);
+ }
+
+ /**
+ * @return the current position within the object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public long tell64() throws SQLException {
+ FastpathArg[] args = new FastpathArg[1];
+ args[0] = new FastpathArg(fd);
+ return fp.getLong("lo_tell64", args);
+ }
+
+ /**
+ * This method is inefficient, as the only way to find out the size of the object is to seek to
+ * the end, record the current position, then return to the original position.
+ *
+ * A better method will be found in the future.
+ *
+ * @return the size of the large object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public int size() throws SQLException {
+ int cp = tell();
+ seek(0, SEEK_END);
+ int sz = tell();
+ seek(cp, SEEK_SET);
+ return sz;
+ }
+
+ /**
+ * See #size() for information about efficiency.
+ *
+ * @return the size of the large object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public long size64() throws SQLException {
+ long cp = tell64();
+ seek64(0, SEEK_END);
+ long sz = tell64();
+ seek64(cp, SEEK_SET);
+ return sz;
+ }
+
+ /**
+ * Truncates the large object to the given length in bytes. If the number of bytes is larger than
+ * the current large object length, the large object will be filled with zero bytes. This method
+ * does not modify the current file offset.
+ *
+ * @param len given length in bytes
+ * @throws SQLException if something goes wrong
+ */
+ public void truncate(int len) throws SQLException {
+ FastpathArg[] args = new FastpathArg[2];
+ args[0] = new FastpathArg(fd);
+ args[1] = new FastpathArg(len);
+ fp.getInteger("lo_truncate", args);
+ }
+
+ /**
+ * Truncates the large object to the given length in bytes. If the number of bytes is larger than
+ * the current large object length, the large object will be filled with zero bytes. This method
+ * does not modify the current file offset.
+ *
+ * @param len given length in bytes
+ * @throws SQLException if something goes wrong
+ */
+ public void truncate64(long len) throws SQLException {
+ FastpathArg[] args = new FastpathArg[2];
+ args[0] = new FastpathArg(fd);
+ args[1] = new FastpathArg(len);
+ fp.getInteger("lo_truncate64", args);
+ }
+
+ /**
+ * Returns an {@link InputStream} from this object.
+ *
+ * This {@link InputStream} can then be used in any method that requires an InputStream.
+ *
+ * @return {@link InputStream} from this object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public InputStream getInputStream() throws SQLException {
+ return new BlobInputStream(this);
+ }
+
+ /**
+ * Returns an {@link InputStream} from this object, that will limit the amount of data that is
+ * visible.
+ *
+ * @param limit maximum number of bytes the resulting stream will serve
+ * @return {@link InputStream} from this object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public InputStream getInputStream(long limit) throws SQLException {
+ return new BlobInputStream(this, BlobInputStream.DEFAULT_MAX_BUFFER_SIZE, limit);
+ }
+
+ /**
+ * Returns an {@link InputStream} from this object, that will limit the amount of data that is
+ * visible.
+ * Added mostly for testing
+ *
+ * @param bufferSize buffer size for the stream
+ * @param limit maximum number of bytes the resulting stream will serve
+ * @return {@link InputStream} from this object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public InputStream getInputStream(int bufferSize, long limit) throws SQLException {
+ return new BlobInputStream(this, bufferSize, limit);
+ }
+
+ /**
+ * Returns an {@link OutputStream} to this object.
+ *
+ * This OutputStream can then be used in any method that requires an OutputStream.
+ *
+ * @return {@link OutputStream} from this object
+ * @throws SQLException if a database-access error occurs.
+ */
+ public OutputStream getOutputStream() throws SQLException {
+ if (os == null) {
+ os = new BlobOutputStream(this);
+ }
+ return os;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java
new file mode 100644
index 0000000..12efe14
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.largeobject;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.fastpath.Fastpath;
+import org.postgresql.fastpath.FastpathArg;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.logging.Level;
+
+/**
+ * This class implements the large object interface to org.postgresql.
+ *
+ * It provides methods that allow client code to create, open and delete large objects from the
+ * database. When opening an object, an instance of org.postgresql.largeobject.LargeObject is
+ * returned, and its methods then allow access to the object.
+ *
+ * This class can only be created by {@link BaseConnection}
+ *
+ * To get access to this class, use the following segment of code:
+ *
+ *
+ * import org.postgresql.largeobject.*;
+ *
+ * Connection conn;
+ * LargeObjectManager lobj;
+ *
+ * ... code that opens a connection ...
+ *
+ * lobj = ((org.postgresql.PGConnection)myconn).getLargeObjectAPI();
+ *
+ *
+ * Normally, client code would use the getAsciiStream, getBinaryStream, or getUnicodeStream methods
+ * in ResultSet, or setAsciiStream, setBinaryStream, or setUnicodeStream methods in
+ * PreparedStatement to access Large Objects.
+ *
+ * However, sometimes lower level access to Large Objects are required, that are not supported by
+ * the JDBC specification.
+ *
+ * Refer to org.postgresql.largeobject.LargeObject on how to manipulate the contents of a Large
+ * Object.
+ *
+ * @see java.sql.ResultSet#getAsciiStream
+ * @see java.sql.ResultSet#getBinaryStream
+ * @see java.sql.ResultSet#getUnicodeStream
+ * @see java.sql.PreparedStatement#setAsciiStream
+ * @see java.sql.PreparedStatement#setBinaryStream
+ * @see java.sql.PreparedStatement#setUnicodeStream
+ */
+@SuppressWarnings("deprecation") // support for deprecated Fastpath API
+public class LargeObjectManager {
+ // the fastpath api for this connection
+ private Fastpath fp;
+ private BaseConnection conn;
+
+ /**
+ * This mode indicates we want to write to an object.
+ */
+ public static final int WRITE = 0x00020000;
+
+ /**
+ * This mode indicates we want to read an object.
+ */
+ public static final int READ = 0x00040000;
+
+ /**
+ * This mode is the default. It indicates we want read and write access to a large object.
+ */
+ public static final int READWRITE = READ | WRITE;
+
+ /**
+ * Constructs the LargeObject API.
+ *
+ * Important Notice
+ * This method should only be called by {@link BaseConnection}
+ *
+ * There should only be one LargeObjectManager per Connection. The {@link BaseConnection} class
+ * keeps track of the various extension API's and it's advised you use those to gain access, and
+ * not going direct.
+ *
+ * @param conn connection
+ * @throws SQLException if something wrong happens
+ */
+ public LargeObjectManager(BaseConnection conn) throws SQLException {
+ this.conn = conn;
+ // We need Fastpath to do anything
+ this.fp = conn.getFastpathAPI();
+
+ // Now get the function oid's for the api
+ //
+ // This is an example of Fastpath.addFunctions();
+ //
+ String sql;
+ if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
+ sql = "SELECT p.proname,p.oid "
+ + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n "
+ + " WHERE p.pronamespace=n.oid AND n.nspname='pg_catalog' AND (";
+ } else {
+ sql = "SELECT proname,oid FROM pg_proc WHERE ";
+ }
+ sql += " proname = 'lo_open'"
+ + " or proname = 'lo_close'"
+ + " or proname = 'lo_creat'"
+ + " or proname = 'lo_unlink'"
+ + " or proname = 'lo_lseek'"
+ + " or proname = 'lo_lseek64'"
+ + " or proname = 'lo_tell'"
+ + " or proname = 'lo_tell64'"
+ + " or proname = 'loread'"
+ + " or proname = 'lowrite'"
+ + " or proname = 'lo_truncate'"
+ + " or proname = 'lo_truncate64'";
+
+ if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
+ sql += ")";
+ }
+
+ Statement stmt = conn.createStatement();
+ ResultSet res = stmt.executeQuery(sql);
+
+ fp.addFunctions(res);
+ res.close();
+ stmt.close();
+
+ conn.getLogger().log(Level.FINE, "Large Object initialised");
+ }
+
+ /**
+ * This opens an existing large object, based on its OID. This method assumes that READ and WRITE
+ * access is required (the default).
+ *
+ * @param oid of large object
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ * @deprecated As of 8.3, replaced by {@link #open(long)}
+ */
+ @Deprecated
+ public LargeObject open(int oid) throws SQLException {
+ return open((long) oid, false);
+ }
+
+ /**
+ * This opens an existing large object, same as previous method, but commits the transaction on
+ * close if asked. This is useful when the LOB is returned to a caller which won't take care of
+ * transactions by itself.
+ *
+ * @param oid of large object
+ * @param commitOnClose commit the transaction when this LOB will be closed
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ */
+
+ public LargeObject open(int oid, boolean commitOnClose) throws SQLException {
+ return open((long) oid, commitOnClose);
+ }
+
+ /**
+ * This opens an existing large object, based on its OID. This method assumes that READ and WRITE
+ * access is required (the default).
+ *
+ * @param oid of large object
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ */
+ public LargeObject open(long oid) throws SQLException {
+ return open(oid, READWRITE, false);
+ }
+
+ /**
+ * This opens an existing large object, same as previous method, but commits the transaction on
+ * close if asked.
+ *
+ * @param oid of large object
+ * @param commitOnClose commit the transaction when this LOB will be closed
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ */
+
+ public LargeObject open(long oid, boolean commitOnClose) throws SQLException {
+ return open(oid, READWRITE, commitOnClose);
+ }
+
+ /**
+ * This opens an existing large object, based on its OID.
+ *
+ * @param oid of large object
+ * @param mode mode of open
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ * @deprecated As of 8.3, replaced by {@link #open(long, int)}
+ */
+ @Deprecated
+ public LargeObject open(int oid, int mode) throws SQLException {
+ return open((long) oid, mode, false);
+ }
+
+ /**
+ * This opens an existing large object, same as previous method, but commits the transaction on
+ * close if asked.
+ *
+ * @param oid of large object
+ * @param mode mode of open
+ * @param commitOnClose commit the transaction when this LOB will be closed
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ */
+
+ public LargeObject open(int oid, int mode, boolean commitOnClose) throws SQLException {
+ return open((long) oid, mode, commitOnClose);
+ }
+
+ /**
+ * This opens an existing large object, based on its OID.
+ *
+ * @param oid of large object
+ * @param mode mode of open
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ */
+ public LargeObject open(long oid, int mode) throws SQLException {
+ return open(oid, mode, false);
+ }
+
+ /**
+ * This opens an existing large object, based on its OID.
+ *
+ * @param oid of large object
+ * @param mode mode of open
+ * @param commitOnClose commit the transaction when this LOB will be closed
+ * @return LargeObject instance providing access to the object
+ * @throws SQLException on error
+ */
+ public LargeObject open(long oid, int mode, boolean commitOnClose) throws SQLException {
+ if (conn.getAutoCommit()) {
+ throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."),
+ PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+ }
+ return new LargeObject(fp, oid, mode, conn, commitOnClose);
+ }
+
+ /**
+ * This creates a large object, returning its OID.
+ *
+ * It defaults to READWRITE for the new object's attributes.
+ *
+ * @return oid of new object
+ * @throws SQLException on error
+ * @deprecated As of 8.3, replaced by {@link #createLO()}
+ */
+ @Deprecated
+ public int create() throws SQLException {
+ return create(READWRITE);
+ }
+
+ /**
+ * This creates a large object, returning its OID.
+ *
+ * It defaults to READWRITE for the new object's attributes.
+ *
+ * @return oid of new object
+ * @throws SQLException if something wrong happens
+ */
+ public long createLO() throws SQLException {
+ return createLO(READWRITE);
+ }
+
+ /**
+ * This creates a large object, returning its OID.
+ *
+ * @param mode a bitmask describing different attributes of the new object
+ * @return oid of new object
+ * @throws SQLException on error
+ */
+ public long createLO(int mode) throws SQLException {
+ if (conn.getAutoCommit()) {
+ throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."),
+ PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+ }
+ FastpathArg[] args = new FastpathArg[1];
+ args[0] = new FastpathArg(mode);
+ return fp.getOID("lo_creat", args);
+ }
+
+ /**
+ * This creates a large object, returning its OID.
+ *
+ * @param mode a bitmask describing different attributes of the new object
+ * @return oid of new object
+ * @throws SQLException on error
+ * @deprecated As of 8.3, replaced by {@link #createLO(int)}
+ */
+ @Deprecated
+ public int create(int mode) throws SQLException {
+ long oid = createLO(mode);
+ return (int) oid;
+ }
+
+ /**
+ * This deletes a large object.
+ *
+ * @param oid describing object to delete
+ * @throws SQLException on error
+ */
+ public void delete(long oid) throws SQLException {
+ FastpathArg[] args = new FastpathArg[1];
+ args[0] = Fastpath.createOIDArg(oid);
+ fp.fastpath("lo_unlink", args);
+ }
+
+ /**
+ * This deletes a large object.
+ *
+ * It is identical to the delete method, and is supplied as the C API uses unlink.
+ *
+ * @param oid describing object to delete
+ * @throws SQLException on error
+ * @deprecated As of 8.3, replaced by {@link #unlink(long)}
+ */
+ @Deprecated
+ public void unlink(int oid) throws SQLException {
+ delete((long) oid);
+ }
+
+ /**
+ * This deletes a large object.
+ *
+ * It is identical to the delete method, and is supplied as the C API uses unlink.
+ *
+ * @param oid describing object to delete
+ * @throws SQLException on error
+ */
+ public void unlink(long oid) throws SQLException {
+ delete(oid);
+ }
+
+ /**
+ * This deletes a large object.
+ *
+ * @param oid describing object to delete
+ * @throws SQLException on error
+ * @deprecated As of 8.3, replaced by {@link #delete(long)}
+ */
+ @Deprecated
+ public void delete(int oid) throws SQLException {
+ delete((long) oid);
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java
new file mode 100644
index 0000000..3734cbe
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.plugin;
+
+import org.postgresql.util.PSQLException;
+
+public interface AuthenticationPlugin {
+
+ /**
+ * Callback method to provide the password to use for authentication.
+ *
+ * Implementers can also check the authentication type to ensure that the
+ * authentication handshake is using a specific authentication method (e.g. SASL)
+ * or avoiding a specific one (e.g. cleartext).
+ *
+ * For security reasons, the driver will wipe the contents of the array returned
+ * by this method after it has been used for authentication.
+ *
+ * Implementers must provide a new array each time this method is invoked as
+ * the previous contents will have been wiped.
+ *
+ * @param type The authentication method that the server is requesting
+ * @return The password to use or null if no password is available
+ * @throws PSQLException if something goes wrong supplying the password
+ */
+ char [] getPassword(AuthenticationRequestType type) throws PSQLException;
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationRequestType.java b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationRequestType.java
new file mode 100644
index 0000000..f62bb11
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationRequestType.java
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.plugin;
+
+public enum AuthenticationRequestType {
+ CLEARTEXT_PASSWORD,
+ GSS,
+ MD5_PASSWORD,
+ SASL,
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java b/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java
new file mode 100644
index 0000000..1886a7d
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication;
+
+import java.nio.ByteBuffer;
+
+/**
+ * LSN (Log Sequence Number) data which is a pointer to a location in the XLOG.
+ */
+public final class LogSequenceNumber implements Comparable {
+ /**
+ * Zero is used indicate an invalid pointer. Bootstrap skips the first possible WAL segment,
+ * initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG record can begin at zero.
+ */
+ public static final LogSequenceNumber INVALID_LSN = LogSequenceNumber.valueOf(0);
+
+ private final long value;
+
+ private LogSequenceNumber(long value) {
+ this.value = value;
+ }
+
+ /**
+ * @param value numeric represent position in the write-ahead log stream
+ * @return not null LSN instance
+ */
+ public static LogSequenceNumber valueOf(long value) {
+ return new LogSequenceNumber(value);
+ }
+
+ /**
+ * Create LSN instance by string represent LSN.
+ *
+ * @param strValue not null string as two hexadecimal numbers of up to 8 digits each, separated by
+ * a slash. For example {@code 16/3002D50}, {@code 0/15D68C50}
+ * @return not null LSN instance where if specified string represent have not valid form {@link
+ * LogSequenceNumber#INVALID_LSN}
+ */
+ public static LogSequenceNumber valueOf(String strValue) {
+ int slashIndex = strValue.lastIndexOf('/');
+
+ if (slashIndex <= 0) {
+ return INVALID_LSN;
+ }
+
+ String logicalXLogStr = strValue.substring(0, slashIndex);
+ int logicalXlog = (int) Long.parseLong(logicalXLogStr, 16);
+ String segmentStr = strValue.substring(slashIndex + 1, strValue.length());
+ int segment = (int) Long.parseLong(segmentStr, 16);
+
+ ByteBuffer buf = ByteBuffer.allocate(8);
+ buf.putInt(logicalXlog);
+ buf.putInt(segment);
+ buf.position(0);
+ long value = buf.getLong();
+
+ return LogSequenceNumber.valueOf(value);
+ }
+
+ /**
+ * @return Long represent position in the write-ahead log stream
+ */
+ public long asLong() {
+ return value;
+ }
+
+ /**
+ * @return String represent position in the write-ahead log stream as two hexadecimal numbers of
+ * up to 8 digits each, separated by a slash. For example {@code 16/3002D50}, {@code 0/15D68C50}
+ */
+ public String asString() {
+ ByteBuffer buf = ByteBuffer.allocate(8);
+ buf.putLong(value);
+ buf.position(0);
+
+ int logicalXlog = buf.getInt();
+ int segment = buf.getInt();
+ return String.format("%X/%X", logicalXlog, segment);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ LogSequenceNumber that = (LogSequenceNumber) o;
+
+ return value == that.value;
+
+ }
+
+ @Override
+ public int hashCode() {
+ return (int) (value ^ (value >>> 32));
+ }
+
+ @Override
+ public String toString() {
+ return "LSN{" + asString() + '}';
+ }
+
+ @Override
+ public int compareTo(LogSequenceNumber o) {
+ if (value == o.value) {
+ return 0;
+ }
+ //Unsigned comparison
+ return value + Long.MIN_VALUE < o.value + Long.MIN_VALUE ? -1 : 1;
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java
new file mode 100644
index 0000000..6148f49
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication;
+
+import org.postgresql.PGProperty;
+import org.postgresql.replication.fluent.ChainedCreateReplicationSlotBuilder;
+import org.postgresql.replication.fluent.ChainedStreamBuilder;
+
+import java.sql.SQLException;
+
+/**
+ * Api available only if connection was create with required for replication properties: {@link
+ * PGProperty#REPLICATION} and {@link PGProperty#ASSUME_MIN_SERVER_VERSION}. Without it property
+ * building replication stream fail with exception.
+ */
+public interface PGReplicationConnection {
+
+ /**
+ * After start replication stream this connection not available to use for another queries until
+ * replication stream will not close.
+ *
+ * @return not null fluent api for build replication stream
+ */
+ ChainedStreamBuilder replicationStream();
+
+ /**
+ * Create replication slot, that can be next use in {@link PGReplicationConnection#replicationStream()}
+ *
+ * Replication slots provide an automated way to ensure that the master does not remove WAL
+ * segments until they have been received by all standbys, and that the master does not remove
+ * rows which could cause a recovery conflict even when the standby is disconnected.
+ *
+ * @return not null fluent api for build create replication slot
+ */
+ ChainedCreateReplicationSlotBuilder createReplicationSlot();
+
+ /**
+ * @param slotName not null replication slot name exists in database that should be drop
+ * @throws SQLException if the replication slot cannot be dropped.
+ */
+ void dropReplicationSlot(String slotName) throws SQLException;
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java
new file mode 100644
index 0000000..350526e
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.replication.fluent.ChainedCreateReplicationSlotBuilder;
+import org.postgresql.replication.fluent.ChainedStreamBuilder;
+import org.postgresql.replication.fluent.ReplicationCreateSlotBuilder;
+import org.postgresql.replication.fluent.ReplicationStreamBuilder;
+
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class PGReplicationConnectionImpl implements PGReplicationConnection {
+ private final BaseConnection connection;
+
+ public PGReplicationConnectionImpl(BaseConnection connection) {
+ this.connection = connection;
+ }
+
+ @Override
+ public ChainedStreamBuilder replicationStream() {
+ return new ReplicationStreamBuilder(connection);
+ }
+
+ @Override
+ public ChainedCreateReplicationSlotBuilder createReplicationSlot() {
+ return new ReplicationCreateSlotBuilder(connection);
+ }
+
+ @Override
+ public void dropReplicationSlot(String slotName) throws SQLException {
+ if (slotName == null || slotName.isEmpty()) {
+ throw new IllegalArgumentException("Replication slot name can't be null or empty");
+ }
+
+ Statement statement = connection.createStatement();
+ try {
+ statement.execute("DROP_REPLICATION_SLOT " + slotName);
+ } finally {
+ statement.close();
+ }
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java
new file mode 100644
index 0000000..cbd06f2
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication;
+
+import org.postgresql.replication.fluent.CommonOptions;
+import org.postgresql.replication.fluent.logical.LogicalReplicationOptions;
+
+import java.nio.ByteBuffer;
+import java.sql.SQLException;
+
+/**
+ * Not tread safe replication stream (though certain methods can be safely called by different
+ * threads). After complete streaming should be close, for free resource on backend. Periodical
+ * status update work only when use {@link PGReplicationStream#read()} method. It means that
+ * process wal record should be fast as possible, because during process wal record lead to
+ * disconnect by timeout from server.
+ */
+public interface PGReplicationStream
+ extends AutoCloseable {
+
+ /**
+ * Read next wal record from backend. It method can be block until new message will not get
+ * from server.
+ *
+ * A single WAL record is never split across two XLogData messages. When a WAL record crosses a
+ * WAL page boundary, and is therefore already split using continuation records, it can be split
+ * at the page boundary. In other words, the first main WAL record and its continuation records
+ * can be sent in different XLogData messages.
+ *
+ * @return not null byte array received by replication protocol, return ByteBuffer wrap around
+ * received byte array with use offset, so, use {@link ByteBuffer#array()} carefully
+ * @throws SQLException when some internal exception occurs during read from stream
+ */
+ ByteBuffer read() throws SQLException;
+
+ /**
+ * Read next WAL record from backend. This method does not block and in contrast to {@link
+ * PGReplicationStream#read()}. If message from backend absent return null. It allow periodically
+ * check message in stream and if they absent sleep some time, but it time should be less than
+ * {@link CommonOptions#getStatusInterval()} to avoid disconnect from the server.
+ *
+ * A single WAL record is never split across two XLogData messages. When a WAL record crosses a
+ * WAL page boundary, and is therefore already split using continuation records, it can be split
+ * at the page boundary. In other words, the first main WAL record and its continuation records
+ * can be sent in different XLogData messages.
+ *
+ * @return byte array received by replication protocol or NULL if pending message from server
+ * absent. Returns ByteBuffer wrap around received byte array with use offset, so, use {@link
+ * ByteBuffer#array()} carefully.
+ * @throws SQLException when some internal exception occurs during read from stream
+ */
+ ByteBuffer readPending() throws SQLException;
+
+ /**
+ * Parameter updates by execute {@link PGReplicationStream#read()} method.
+ *
+ * It is safe to call this method in a thread different than the main thread. However, usually this
+ * method is called in the main thread after a successful {@link PGReplicationStream#read()} or
+ * {@link PGReplicationStream#readPending()}, to get the LSN corresponding to the received record.
+ *
+ * @return NOT NULL LSN position that was receive last time via {@link PGReplicationStream#read()}
+ * method
+ */
+ LogSequenceNumber getLastReceiveLSN();
+
+ /**
+ * Last flushed LSN sent in update message to backend. Parameter updates only via {@link
+ * PGReplicationStream#setFlushedLSN(LogSequenceNumber)}
+ *
+ * It is safe to call this method in a thread different than the main thread.
+ *
+ * @return NOT NULL location of the last WAL flushed to disk in the standby.
+ */
+ LogSequenceNumber getLastFlushedLSN();
+
+ /**
+ * Last applied lsn sent in update message to backed. Parameter updates only via {@link
+ * PGReplicationStream#setAppliedLSN(LogSequenceNumber)}
+ *
+ * It is safe to call this method in a thread different than the main thread.
+ *
+ * @return not null location of the last WAL applied in the standby.
+ */
+ LogSequenceNumber getLastAppliedLSN();
+
+ /**
+ * Set flushed LSN. This parameter will be sent to backend on next update status iteration. Flushed
+ * LSN position help backend define which WAL can be recycled.
+ *
+ * It is safe to call this method in a thread different than the main thread. The updated value
+ * will be sent to the backend in the next status update run.
+ *
+ * @param flushed NOT NULL location of the last WAL flushed to disk in the standby.
+ * @see PGReplicationStream#forceUpdateStatus()
+ */
+ void setFlushedLSN(LogSequenceNumber flushed);
+
+ /**
+ * Inform backend which LSN has been applied on standby.
+ * Feedback will send to backend on next update status iteration.
+ *
+ * It is safe to call this method in a thread different than the main thread. The updated value
+ * will be sent to the backend in the next status update run.
+ *
+ * @param applied NOT NULL location of the last WAL applied in the standby.
+ * @see PGReplicationStream#forceUpdateStatus()
+ */
+ void setAppliedLSN(LogSequenceNumber applied);
+
+ /**
+ * Force send last received, flushed and applied LSN status to backend. You cannot send LSN status
+ * explicitly because {@link PGReplicationStream} sends the status to backend periodically by
+ * configured interval via {@link LogicalReplicationOptions#getStatusInterval}
+ *
+ * @throws SQLException when some internal exception occurs during read from stream
+ * @see LogicalReplicationOptions#getStatusInterval()
+ */
+ void forceUpdateStatus() throws SQLException;
+
+ /**
+ * @return {@code true} if replication stream was already close, otherwise return {@code false}
+ */
+ boolean isClosed();
+
+ /**
+ * Stop replication changes from server and free resources. After that connection can be reuse
+ * to another queries. Also after close current stream they cannot be used anymore.
+ *
+ * Note: This method can spend much time for logical replication stream on postgresql
+ * version 9.6 and lower, because postgresql have bug - during decode big transaction to logical
+ * form and during wait new changes postgresql ignore messages from client. As workaround you can
+ * close replication connection instead of close replication stream. For more information about it
+ * problem see mailing list thread
+ * Stopping logical replication protocol
+ *
+ * @throws SQLException when some internal exception occurs during end streaming
+ */
+ @Override
+ void close() throws SQLException;
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java
new file mode 100644
index 0000000..8c904b3
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication;
+
+/**
+ * Information returned on replication slot creation.
+ *
+ * Returned keys of CREATE_REPLICATION_SLOT:
+ *
+ * - slot_name String {@code =>} the slot name
+ *
- consistent_point String {@code =>} LSN at which we became consistent
+ *
- snapshot_name String {@code =>} exported snapshot's name (may be
null
)
+ * - output_plugin String {@code =>} output plugin (may be
null
)
+ *
+ *
+ * @see CREATE_REPLICATION_SLOT documentation
+ */
+public final class ReplicationSlotInfo {
+
+ private final String slotName;
+ private final ReplicationType replicationType;
+ private final LogSequenceNumber consistentPoint;
+ private final String snapshotName;
+ private final String outputPlugin;
+
+ public ReplicationSlotInfo(String slotName, ReplicationType replicationType,
+ LogSequenceNumber consistentPoint, String snapshotName,
+ String outputPlugin) {
+ this.slotName = slotName;
+ this.replicationType = replicationType;
+ this.consistentPoint = consistentPoint;
+ this.snapshotName = snapshotName;
+ this.outputPlugin = outputPlugin;
+ }
+
+ /**
+ * Replication slot name.
+ *
+ * @return the slot name
+ */
+ public String getSlotName() {
+ return slotName;
+ }
+
+ /**
+ * Replication type of the slot created, might be PHYSICAL or LOGICAL.
+ *
+ * @return ReplicationType, PHYSICAL or LOGICAL
+ */
+ public ReplicationType getReplicationType() {
+ return replicationType;
+ }
+
+ /**
+ * LSN at which we became consistent.
+ *
+ * @return LogSequenceNumber with the consistent_point
+ */
+ public LogSequenceNumber getConsistentPoint() {
+ return consistentPoint;
+ }
+
+ /**
+ * Exported snapshot name at the point of replication slot creation.
+ *
+ * As long as the exporting transaction remains open, other transactions can import its snapshot,
+ * and thereby be guaranteed that they see exactly the same view of the database that the first
+ * transaction sees.
+ *
+ * @return exported snapshot_name (may be null
)
+ */
+ public String getSnapshotName() {
+ return snapshotName;
+ }
+
+ /**
+ * Output Plugin used on slot creation.
+ *
+ * @return output_plugin (may be null
)
+ */
+ public String getOutputPlugin() {
+ return outputPlugin;
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java
new file mode 100644
index 0000000..ab93bfd
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2017, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication;
+
+public enum ReplicationType {
+ LOGICAL,
+ PHYSICAL
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java
new file mode 100644
index 0000000..807400f
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.util.GT;
+
+import java.sql.SQLFeatureNotSupportedException;
+
+public abstract class AbstractCreateSlotBuilder>
+ implements ChainedCommonCreateSlotBuilder {
+
+ protected String slotName;
+ protected boolean temporaryOption;
+ protected BaseConnection connection;
+
+ protected AbstractCreateSlotBuilder(BaseConnection connection) {
+ this.connection = connection;
+ }
+
+ protected abstract T self();
+
+ @Override
+ public T withSlotName(String slotName) {
+ this.slotName = slotName;
+ return self();
+ }
+
+ @Override
+ public T withTemporaryOption() throws SQLFeatureNotSupportedException {
+
+ if (!connection.haveMinimumServerVersion(ServerVersion.v10)) {
+ throw new SQLFeatureNotSupportedException(
+ GT.tr("Server does not support temporary replication slots")
+ );
+ }
+
+ this.temporaryOption = true;
+ return self();
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java
new file mode 100644
index 0000000..8f08bba
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.replication.LogSequenceNumber;
+
+import java.util.concurrent.TimeUnit;
+
+public abstract class AbstractStreamBuilder>
+ implements ChainedCommonStreamBuilder {
+ private static final int DEFAULT_STATUS_INTERVAL = (int) TimeUnit.SECONDS.toMillis(10L);
+ protected int statusIntervalMs = DEFAULT_STATUS_INTERVAL;
+ protected LogSequenceNumber startPosition = LogSequenceNumber.INVALID_LSN;
+ protected String slotName;
+
+ public AbstractStreamBuilder() {
+ }
+
+ protected abstract T self();
+
+ @Override
+ public T withStatusInterval(int time, TimeUnit format) {
+ statusIntervalMs = (int) TimeUnit.MILLISECONDS.convert(time, format);
+ return self();
+ }
+
+ @Override
+ public T withStartPosition(LogSequenceNumber lsn) {
+ this.startPosition = lsn;
+ return self();
+ }
+
+ @Override
+ public T withSlotName(String slotName) {
+ this.slotName = slotName;
+ return self();
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java
new file mode 100644
index 0000000..4114bef
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.replication.ReplicationSlotInfo;
+
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+
+/**
+ * Fluent interface for specify common parameters for create Logical and Physical replication slot.
+ */
+public interface ChainedCommonCreateSlotBuilder> {
+
+ /**
+ * Replication slots provide an automated way to ensure that the master does not remove WAL
+ * segments until they have been received by all standbys, and that the master does not remove
+ * rows which could cause a recovery conflict even when the standby is disconnected.
+ *
+ * @param slotName not null unique replication slot name for create.
+ * @return T a slot builder
+ */
+ T withSlotName(String slotName);
+
+ /**
+ * Temporary slots are not saved to disk and are automatically dropped on error or when
+ * the session has finished.
+ *
+ * This feature is only supported by PostgreSQL versions >= 10.
+ *
+ * @return T a slot builder
+ * @throws SQLFeatureNotSupportedException thrown if PostgreSQL version is less than 10.
+ */
+ T withTemporaryOption() throws SQLFeatureNotSupportedException;
+
+ /**
+ * Create slot with specified parameters in database.
+ *
+ * @return ReplicationSlotInfo with the information of the created slot.
+ * @throws SQLException on error
+ */
+ ReplicationSlotInfo make() throws SQLException;
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java
new file mode 100644
index 0000000..2a41246
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.replication.LogSequenceNumber;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Fluent interface for specify common parameters for Logical and Physical replication.
+ */
+public interface ChainedCommonStreamBuilder> {
+
+ /**
+ * Replication slots provide an automated way to ensure that the master does not remove WAL
+ * segments until they have been received by all standbys, and that the master does not remove
+ * rows which could cause a recovery conflict even when the standby is disconnected.
+ *
+ * @param slotName not null replication slot already exists on server.
+ * @return this instance as a fluent interface
+ */
+ T withSlotName(String slotName);
+
+ /**
+ * Specifies the number of time between status packets sent back to the server. This allows for
+ * easier monitoring of the progress from server. A value of zero disables the periodic status
+ * updates completely, although an update will still be sent when requested by the server, to
+ * avoid timeout disconnect. The default value is 10 seconds.
+ *
+ * @param time positive time
+ * @param format format for specified time
+ * @return not null fluent
+ */
+ T withStatusInterval(int time, TimeUnit format);
+
+ /**
+ * Specify start position from which backend will start stream changes. If parameter will not
+ * specify, streaming starts from restart_lsn. For more details see pg_replication_slots
+ * description.
+ *
+ * @param lsn not null position from which need start replicate changes
+ * @return not null fluent
+ */
+ T withStartPosition(LogSequenceNumber lsn);
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java
new file mode 100644
index 0000000..36e2f0b
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.replication.fluent.logical.ChainedLogicalCreateSlotBuilder;
+import org.postgresql.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder;
+
+/**
+ * Fluent interface for specify common parameters for Logical and Physical replication.
+ */
+public interface ChainedCreateReplicationSlotBuilder {
+ /**
+ * Get the logical slot builder.
+ * Example usage:
+ *
+ * {@code
+ *
+ * pgConnection
+ * .getReplicationAPI()
+ * .createReplicationSlot()
+ * .logical()
+ * .withSlotName("mySlot")
+ * .withOutputPlugin("test_decoding")
+ * .make();
+ *
+ * PGReplicationStream stream =
+ * pgConnection
+ * .getReplicationAPI()
+ * .replicationStream()
+ * .logical()
+ * .withSlotName("mySlot")
+ * .withSlotOption("include-xids", false)
+ * .withSlotOption("skip-empty-xacts", true)
+ * .start();
+ *
+ * while (true) {
+ * ByteBuffer buffer = stream.read();
+ * //process logical changes
+ * }
+ *
+ * }
+ *
+ * @return not null fluent api
+ */
+ ChainedLogicalCreateSlotBuilder logical();
+
+ /**
+ * Create physical replication stream for process wal logs in binary form.
+ *
+ * Example usage:
+ *
+ * {@code
+ *
+ * pgConnection
+ * .getReplicationAPI()
+ * .createReplicationSlot()
+ * .physical()
+ * .withSlotName("mySlot")
+ * .make();
+ *
+ * PGReplicationStream stream =
+ * pgConnection
+ * .getReplicationAPI()
+ * .replicationStream()
+ * .physical()
+ * .withSlotName("mySlot")
+ * .start();
+ *
+ * while (true) {
+ * ByteBuffer buffer = stream.read();
+ * //process binary WAL logs
+ * }
+ *
+ * }
+ *
+ *
+ * @return not null fluent api
+ */
+ ChainedPhysicalCreateSlotBuilder physical();
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java
new file mode 100644
index 0000000..58cbd2e
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder;
+import org.postgresql.replication.fluent.physical.ChainedPhysicalStreamBuilder;
+
+/**
+ * Start point for fluent API that build replication stream(logical or physical).
+ * Api not thread safe, and can be use only for crate single stream.
+ */
+public interface ChainedStreamBuilder {
+ /**
+ * Create logical replication stream that decode raw wal logs by output plugin to logical form.
+ * Default about logical decoding you can see by following link
+ *
+ * Logical Decoding Concepts
+ * .
+ *
+ *
+ * Example usage:
+ *
+ * {@code
+ *
+ * PGReplicationStream stream =
+ * pgConnection
+ * .getReplicationAPI()
+ * .replicationStream()
+ * .logical()
+ * .withSlotName("test_decoding")
+ * .withSlotOption("include-xids", false)
+ * .withSlotOption("skip-empty-xacts", true)
+ * .start();
+ *
+ * while (true) {
+ * ByteBuffer buffer = stream.read();
+ * //process logical changes
+ * }
+ *
+ * }
+ *
+ *
+ * @return not null fluent api
+ */
+ ChainedLogicalStreamBuilder logical();
+
+ /**
+ * Create physical replication stream for process wal logs in binary form.
+ *
+ * Example usage:
+ *
+ * {@code
+ *
+ * LogSequenceNumber lsn = getCurrentLSN();
+ *
+ * PGReplicationStream stream =
+ * pgConnection
+ * .getReplicationAPI()
+ * .replicationStream()
+ * .physical()
+ * .withStartPosition(lsn)
+ * .start();
+ *
+ * while (true) {
+ * ByteBuffer buffer = stream.read();
+ * //process binary WAL logs
+ * }
+ *
+ * }
+ *
+ *
+ * @return not null fluent api
+ */
+ ChainedPhysicalStreamBuilder physical();
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java
new file mode 100644
index 0000000..6eacbee
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.replication.LogSequenceNumber;
+
+/**
+ * Common parameters for logical and physical replication.
+ */
+public interface CommonOptions {
+ /**
+ * Replication slots provide an automated way to ensure that the master does not remove WAL
+ * segments until they have been received by all standbys, and that the master does not remove
+ * rows which could cause a recovery conflict even when the standby is disconnected.
+ *
+ * @return nullable replication slot name that already exists on server and free.
+ */
+ String getSlotName();
+
+ /**
+ * @return the position to start replication. This cannot be null.
+ */
+ LogSequenceNumber getStartLSNPosition();
+
+ /**
+ * Specifies the number of millisecond between status packets sent back to the server. This allows
+ * for easier monitoring of the progress from server. A value of zero disables the periodic status
+ * updates completely, although an update will still be sent when requested by the server, to
+ * avoid timeout disconnect. The default value is 10 seconds.
+ *
+ * @return the current status interval
+ */
+ int getStatusInterval();
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java
new file mode 100644
index 0000000..e0067a3
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.replication.fluent.logical.ChainedLogicalCreateSlotBuilder;
+import org.postgresql.replication.fluent.logical.LogicalCreateSlotBuilder;
+import org.postgresql.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder;
+import org.postgresql.replication.fluent.physical.PhysicalCreateSlotBuilder;
+
+public class ReplicationCreateSlotBuilder implements ChainedCreateReplicationSlotBuilder {
+ private final BaseConnection baseConnection;
+
+ public ReplicationCreateSlotBuilder(BaseConnection baseConnection) {
+ this.baseConnection = baseConnection;
+ }
+
+ @Override
+ public ChainedLogicalCreateSlotBuilder logical() {
+ return new LogicalCreateSlotBuilder(baseConnection);
+ }
+
+ @Override
+ public ChainedPhysicalCreateSlotBuilder physical() {
+ return new PhysicalCreateSlotBuilder(baseConnection);
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java
new file mode 100644
index 0000000..4d8443a
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ReplicationProtocol;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder;
+import org.postgresql.replication.fluent.logical.LogicalReplicationOptions;
+import org.postgresql.replication.fluent.logical.LogicalStreamBuilder;
+import org.postgresql.replication.fluent.logical.StartLogicalReplicationCallback;
+import org.postgresql.replication.fluent.physical.ChainedPhysicalStreamBuilder;
+import org.postgresql.replication.fluent.physical.PhysicalReplicationOptions;
+import org.postgresql.replication.fluent.physical.PhysicalStreamBuilder;
+import org.postgresql.replication.fluent.physical.StartPhysicalReplicationCallback;
+
+import java.sql.SQLException;
+
+public class ReplicationStreamBuilder implements ChainedStreamBuilder {
+ private final BaseConnection baseConnection;
+
+ /**
+ * @param connection not null connection with that will be associate replication
+ */
+ public ReplicationStreamBuilder(final BaseConnection connection) {
+ this.baseConnection = connection;
+ }
+
+ @Override
+ public ChainedLogicalStreamBuilder logical() {
+ return new LogicalStreamBuilder(new StartLogicalReplicationCallback() {
+ @Override
+ public PGReplicationStream start(LogicalReplicationOptions options) throws SQLException {
+ ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
+ return protocol.startLogical(options);
+ }
+ });
+ }
+
+ @Override
+ public ChainedPhysicalStreamBuilder physical() {
+ return new PhysicalStreamBuilder(new StartPhysicalReplicationCallback() {
+ @Override
+ public PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException {
+ ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
+ return protocol.startPhysical(options);
+ }
+ });
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java
new file mode 100644
index 0000000..cae77bb
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.logical;
+
+import org.postgresql.replication.fluent.ChainedCommonCreateSlotBuilder;
+
+/**
+ * Logical replication slot specific parameters.
+ */
+public interface ChainedLogicalCreateSlotBuilder
+ extends ChainedCommonCreateSlotBuilder {
+
+ /**
+ * Output plugin that should be use for decode physical represent WAL to some logical form.
+ * Output plugin should be installed on server(exists in shared_preload_libraries).
+ *
+ * Package postgresql-contrib provides sample output plugin test_decoding that can be
+ * use for test logical replication api
+ *
+ * @param outputPlugin not null name of the output plugin used for logical decoding
+ * @return the logical slot builder
+ */
+ ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin);
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java
new file mode 100644
index 0000000..0dc60b9
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.logical;
+
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.replication.fluent.ChainedCommonStreamBuilder;
+
+import java.sql.SQLException;
+import java.util.Properties;
+
+public interface ChainedLogicalStreamBuilder
+ extends ChainedCommonStreamBuilder {
+ /**
+ * Open logical replication stream.
+ *
+ * @return not null PGReplicationStream available for fetch data in logical form
+ * @throws SQLException if there are errors
+ */
+ PGReplicationStream start() throws SQLException;
+
+ /**
+ *
+ * @param optionName name of option
+ * @param optionValue boolean value
+ * @return ChainedLogicalStreamBuilder
+ */
+
+ ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue);
+
+ /**
+ *
+ * @param optionName name of option
+ * @param optionValue integer value
+ * @return ChainedLogicalStreamBuilder
+ */
+ ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue);
+
+ /**
+ *
+ * @param optionName name of option
+ * @param optionValue String value
+ * @return ChainedLogicalStreamBuilder
+ */
+ ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue);
+
+ /**
+ *
+ * @param options properties
+ * @return ChainedLogicalStreamBuilder
+ */
+ ChainedLogicalStreamBuilder withSlotOptions(Properties options);
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java
new file mode 100644
index 0000000..0688822
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.logical;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.ReplicationSlotInfo;
+import org.postgresql.replication.ReplicationType;
+import org.postgresql.replication.fluent.AbstractCreateSlotBuilder;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class LogicalCreateSlotBuilder
+ extends AbstractCreateSlotBuilder
+ implements ChainedLogicalCreateSlotBuilder {
+
+ private String outputPlugin;
+
+ public LogicalCreateSlotBuilder(BaseConnection connection) {
+ super(connection);
+ }
+
+ @Override
+ protected ChainedLogicalCreateSlotBuilder self() {
+ return this;
+ }
+
+ @Override
+ public ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin) {
+ this.outputPlugin = outputPlugin;
+ return self();
+ }
+
+ @Override
+ public ReplicationSlotInfo make() throws SQLException {
+ String outputPlugin = this.outputPlugin;
+ if (outputPlugin == null || outputPlugin.isEmpty()) {
+ throw new IllegalArgumentException(
+ "OutputPlugin required parameter for logical replication slot");
+ }
+
+ if (slotName == null || slotName.isEmpty()) {
+ throw new IllegalArgumentException("Replication slotName can't be null");
+ }
+
+ Statement statement = connection.createStatement();
+ ResultSet result = null;
+ ReplicationSlotInfo slotInfo = null;
+ try {
+ String sql = String.format(
+ "CREATE_REPLICATION_SLOT %s %s LOGICAL %s",
+ slotName,
+ temporaryOption ? "TEMPORARY" : "",
+ outputPlugin
+ );
+ statement.execute(sql);
+ result = statement.getResultSet();
+ if (result != null && result.next()) {
+ slotInfo = new ReplicationSlotInfo(
+ result.getString("slot_name"),
+ ReplicationType.LOGICAL,
+ LogSequenceNumber.valueOf(result.getString("consistent_point")),
+ result.getString("snapshot_name"),
+ result.getString("output_plugin"));
+ } else {
+ throw new PSQLException(
+ GT.tr("{0} returned no results"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ } finally {
+ if (result != null) {
+ result.close();
+ }
+ statement.close();
+ }
+ return slotInfo;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java
new file mode 100644
index 0000000..8f1ef01
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.logical;
+
+import org.postgresql.replication.fluent.CommonOptions;
+
+import java.util.Properties;
+
+public interface LogicalReplicationOptions extends CommonOptions {
+ /**
+ * Required parameter for logical replication.
+ *
+ * @return not null logical replication slot name that already exists on server and free.
+ */
+ @Override
+ String getSlotName();
+
+ /**
+ * Parameters for output plugin. Parameters will be set to output plugin that register for
+ * specified replication slot name.
+ *
+ * @return list options that will be pass to output_plugin for that was create replication slot
+ */
+ Properties getSlotOptions();
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java
new file mode 100644
index 0000000..f8a1bcf
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.logical;
+
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.replication.fluent.AbstractStreamBuilder;
+
+import java.sql.SQLException;
+import java.util.Properties;
+
+public class LogicalStreamBuilder extends AbstractStreamBuilder
+ implements ChainedLogicalStreamBuilder, LogicalReplicationOptions {
+ private final Properties slotOptions;
+
+ private final StartLogicalReplicationCallback startCallback;
+
+ /**
+ * @param startCallback not null callback that should be execute after build parameters for start
+ * replication
+ */
+ public LogicalStreamBuilder(StartLogicalReplicationCallback startCallback) {
+ this.startCallback = startCallback;
+ this.slotOptions = new Properties();
+ }
+
+ @Override
+ protected ChainedLogicalStreamBuilder self() {
+ return this;
+ }
+
+ @Override
+ public PGReplicationStream start() throws SQLException {
+ return startCallback.start(this);
+ }
+
+ @Override
+ public String getSlotName() {
+ return slotName;
+ }
+
+ @Override
+ public ChainedLogicalStreamBuilder withStartPosition(LogSequenceNumber lsn) {
+ startPosition = lsn;
+ return this;
+ }
+
+ @Override
+ public ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue) {
+ slotOptions.setProperty(optionName, String.valueOf(optionValue));
+ return this;
+ }
+
+ @Override
+ public ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue) {
+ slotOptions.setProperty(optionName, String.valueOf(optionValue));
+ return this;
+ }
+
+ @Override
+ public ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue) {
+ slotOptions.setProperty(optionName, optionValue);
+ return this;
+ }
+
+ @Override
+ public ChainedLogicalStreamBuilder withSlotOptions(Properties options) {
+ for (String propertyName : options.stringPropertyNames()) {
+ slotOptions.setProperty(propertyName, options.getProperty(propertyName));
+ }
+ return this;
+ }
+
+ @Override
+ public LogSequenceNumber getStartLSNPosition() {
+ return startPosition;
+ }
+
+ @Override
+ public Properties getSlotOptions() {
+ return slotOptions;
+ }
+
+ @Override
+ public int getStatusInterval() {
+ return statusIntervalMs;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java
new file mode 100644
index 0000000..8612eca
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.logical;
+
+import org.postgresql.replication.PGReplicationStream;
+
+import java.sql.SQLException;
+
+public interface StartLogicalReplicationCallback {
+ PGReplicationStream start(LogicalReplicationOptions options) throws SQLException;
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java
new file mode 100644
index 0000000..8fdc810
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.physical;
+
+import org.postgresql.replication.fluent.ChainedCommonCreateSlotBuilder;
+
+/**
+ * Physical replication slot specific parameters.
+ */
+public interface ChainedPhysicalCreateSlotBuilder extends
+ ChainedCommonCreateSlotBuilder {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java
new file mode 100644
index 0000000..f458c88
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.physical;
+
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.replication.fluent.ChainedCommonStreamBuilder;
+
+import java.sql.SQLException;
+
+public interface ChainedPhysicalStreamBuilder extends
+ ChainedCommonStreamBuilder {
+
+ /**
+ * Open physical replication stream.
+ *
+ * @return not null PGReplicationStream available for fetch wal logs in binary form
+ * @throws SQLException on error
+ */
+ PGReplicationStream start() throws SQLException;
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java
new file mode 100644
index 0000000..4c2597b
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.physical;
+
+import org.postgresql.core.BaseConnection;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.ReplicationSlotInfo;
+import org.postgresql.replication.ReplicationType;
+import org.postgresql.replication.fluent.AbstractCreateSlotBuilder;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class PhysicalCreateSlotBuilder
+ extends AbstractCreateSlotBuilder
+ implements ChainedPhysicalCreateSlotBuilder {
+
+ public PhysicalCreateSlotBuilder(BaseConnection connection) {
+ super(connection);
+ }
+
+ @Override
+ protected ChainedPhysicalCreateSlotBuilder self() {
+ return this;
+ }
+
+ @Override
+ public ReplicationSlotInfo make() throws SQLException {
+ if (slotName == null || slotName.isEmpty()) {
+ throw new IllegalArgumentException("Replication slotName can't be null");
+ }
+
+ Statement statement = connection.createStatement();
+ ResultSet result = null;
+ ReplicationSlotInfo slotInfo = null;
+ try {
+ String sql = String.format(
+ "CREATE_REPLICATION_SLOT %s %s PHYSICAL",
+ slotName,
+ temporaryOption ? "TEMPORARY" : ""
+ );
+ statement.execute(sql);
+ result = statement.getResultSet();
+ if (result != null && result.next()) {
+ slotInfo = new ReplicationSlotInfo(
+ result.getString("slot_name"),
+ ReplicationType.PHYSICAL,
+ LogSequenceNumber.valueOf(result.getString("consistent_point")),
+ result.getString("snapshot_name"),
+ result.getString("output_plugin"));
+ } else {
+ throw new PSQLException(
+ GT.tr("{0} returned no results"),
+ PSQLState.OBJECT_NOT_IN_STATE);
+ }
+ } finally {
+ if (result != null) {
+ result.close();
+ }
+ statement.close();
+ }
+ return slotInfo;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalReplicationOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalReplicationOptions.java
new file mode 100644
index 0000000..58326d9
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalReplicationOptions.java
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.physical;
+
+import org.postgresql.replication.fluent.CommonOptions;
+
+public interface PhysicalReplicationOptions extends CommonOptions {
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java
new file mode 100644
index 0000000..eb177d0
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.physical;
+
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.replication.fluent.AbstractStreamBuilder;
+
+import java.sql.SQLException;
+
+public class PhysicalStreamBuilder extends AbstractStreamBuilder
+ implements ChainedPhysicalStreamBuilder, PhysicalReplicationOptions {
+
+ private final StartPhysicalReplicationCallback startCallback;
+
+ /**
+ * @param startCallback not null callback that should be execute after build parameters for start
+ * replication
+ */
+ public PhysicalStreamBuilder(StartPhysicalReplicationCallback startCallback) {
+ this.startCallback = startCallback;
+ }
+
+ @Override
+ protected ChainedPhysicalStreamBuilder self() {
+ return this;
+ }
+
+ @Override
+ public PGReplicationStream start() throws SQLException {
+ return this.startCallback.start(this);
+ }
+
+ @Override
+ public String getSlotName() {
+ return slotName;
+ }
+
+ @Override
+ public LogSequenceNumber getStartLSNPosition() {
+ return startPosition;
+ }
+
+ @Override
+ public int getStatusInterval() {
+ return statusIntervalMs;
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java
new file mode 100644
index 0000000..543edcb
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.replication.fluent.physical;
+
+import org.postgresql.replication.PGReplicationStream;
+
+import java.sql.SQLException;
+
+public interface StartPhysicalReplicationCallback {
+ PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException;
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java b/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java
new file mode 100644
index 0000000..f673e34
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2017, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.scram;
+
+import org.postgresql.core.PGStream;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import com.ongres.scram.client.ScramClient;
+import com.ongres.scram.client.ScramSession;
+import com.ongres.scram.common.exception.ScramException;
+import com.ongres.scram.common.exception.ScramInvalidServerSignatureException;
+import com.ongres.scram.common.exception.ScramParseException;
+import com.ongres.scram.common.exception.ScramServerErrorException;
+import com.ongres.scram.common.stringprep.StringPreparations;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+public class ScramAuthenticator {
+ private static final Logger LOGGER = Logger.getLogger(ScramAuthenticator.class.getName());
+
+ private final String user;
+ private final String password;
+ private final PGStream pgStream;
+ private ScramClient scramClient;
+ private ScramSession scramSession;
+ private ScramSession.ClientFinalProcessor clientFinalProcessor;
+
+ private interface BodySender {
+ void sendBody(PGStream pgStream) throws IOException;
+ }
+
+ private void sendAuthenticationMessage(int bodyLength, BodySender bodySender)
+ throws IOException {
+ pgStream.sendChar('p');
+ pgStream.sendInteger4(Integer.SIZE / Byte.SIZE + bodyLength);
+ bodySender.sendBody(pgStream);
+ pgStream.flush();
+ }
+
+ public ScramAuthenticator(String user, String password, PGStream pgStream) {
+ this.user = user;
+ this.password = password;
+ this.pgStream = pgStream;
+ }
+
+ public void processServerMechanismsAndInit() throws IOException, PSQLException {
+ List mechanisms = new ArrayList<>();
+ do {
+ mechanisms.add(pgStream.receiveString());
+ } while (pgStream.peekChar() != 0);
+ int c = pgStream.receiveChar();
+ assert c == 0;
+ if (mechanisms.isEmpty()) {
+ throw new PSQLException(
+ GT.tr("No SCRAM mechanism(s) advertised by the server"),
+ PSQLState.CONNECTION_REJECTED
+ );
+ }
+
+ ScramClient scramClient;
+ try {
+ scramClient = ScramClient
+ .channelBinding(ScramClient.ChannelBinding.NO)
+ .stringPreparation(StringPreparations.SASL_PREPARATION)
+ .selectMechanismBasedOnServerAdvertised(mechanisms.toArray(new String[]{}))
+ .setup();
+ } catch (IllegalArgumentException e) {
+ throw new PSQLException(
+ GT.tr("Invalid or unsupported by client SCRAM mechanisms", e),
+ PSQLState.CONNECTION_REJECTED
+ );
+ }
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, " Using SCRAM mechanism {0}", scramClient.getScramMechanism().getName());
+ }
+
+ this.scramClient = scramClient;
+ scramSession =
+ scramClient.scramSession("*"); // Real username is ignored by server, uses startup one
+ }
+
+ public void sendScramClientFirstMessage() throws IOException {
+ ScramSession scramSession = this.scramSession;
+ String clientFirstMessage = scramSession.clientFirstMessage();
+ LOGGER.log(Level.FINEST, " FE=> SASLInitialResponse( {0} )", clientFirstMessage);
+
+ ScramClient scramClient = this.scramClient;
+ String scramMechanismName = scramClient.getScramMechanism().getName();
+ final byte[] scramMechanismNameBytes = scramMechanismName.getBytes(StandardCharsets.UTF_8);
+ final byte[] clientFirstMessageBytes = clientFirstMessage.getBytes(StandardCharsets.UTF_8);
+ sendAuthenticationMessage(
+ (scramMechanismNameBytes.length + 1) + 4 + clientFirstMessageBytes.length,
+ new BodySender() {
+ @Override
+ public void sendBody(PGStream pgStream) throws IOException {
+ pgStream.send(scramMechanismNameBytes);
+ pgStream.sendChar(0); // List terminated in '\0'
+ pgStream.sendInteger4(clientFirstMessageBytes.length);
+ pgStream.send(clientFirstMessageBytes);
+ }
+ }
+ );
+ }
+
+ public void processServerFirstMessage(int length) throws IOException, PSQLException {
+ String serverFirstMessage = pgStream.receiveString(length);
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLContinue( {0} )", serverFirstMessage);
+
+ ScramSession scramSession = this.scramSession;
+ if (scramSession == null) {
+ throw new PSQLException(
+ GT.tr("SCRAM session does not exist"),
+ PSQLState.UNKNOWN_STATE
+ );
+ }
+
+ ScramSession.ServerFirstProcessor serverFirstProcessor;
+ try {
+ serverFirstProcessor = scramSession.receiveServerFirstMessage(serverFirstMessage);
+ } catch (ScramException e) {
+ throw new PSQLException(
+ GT.tr("Invalid server-first-message: {0}", serverFirstMessage),
+ PSQLState.CONNECTION_REJECTED,
+ e
+ );
+ }
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST,
+ " <=BE AuthenticationSASLContinue(salt={0}, iterations={1})",
+ new Object[]{serverFirstProcessor.getSalt(), serverFirstProcessor.getIteration()}
+ );
+ }
+
+ clientFinalProcessor = serverFirstProcessor.clientFinalProcessor(password);
+
+ String clientFinalMessage = clientFinalProcessor.clientFinalMessage();
+ LOGGER.log(Level.FINEST, " FE=> SASLResponse( {0} )", clientFinalMessage);
+
+ final byte[] clientFinalMessageBytes = clientFinalMessage.getBytes(StandardCharsets.UTF_8);
+ sendAuthenticationMessage(
+ clientFinalMessageBytes.length,
+ new BodySender() {
+ @Override
+ public void sendBody(PGStream pgStream) throws IOException {
+ pgStream.send(clientFinalMessageBytes);
+ }
+ }
+ );
+ }
+
+ public void verifyServerSignature(int length) throws IOException, PSQLException {
+ String serverFinalMessage = pgStream.receiveString(length);
+ LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLFinal( {0} )", serverFinalMessage);
+
+ ScramSession.ClientFinalProcessor clientFinalProcessor = this.clientFinalProcessor;
+ if (clientFinalProcessor == null) {
+ throw new PSQLException(
+ GT.tr("SCRAM client final processor does not exist"),
+ PSQLState.UNKNOWN_STATE
+ );
+ }
+ try {
+ clientFinalProcessor.receiveServerFinalMessage(serverFinalMessage);
+ } catch (ScramParseException e) {
+ throw new PSQLException(
+ GT.tr("Invalid server-final-message: {0}", serverFinalMessage),
+ PSQLState.CONNECTION_REJECTED,
+ e
+ );
+ } catch (ScramServerErrorException e) {
+ throw new PSQLException(
+ GT.tr("SCRAM authentication failed, server returned error: {0}",
+ e.getError().getErrorMessage()),
+ PSQLState.CONNECTION_REJECTED,
+ e
+ );
+ } catch (ScramInvalidServerSignatureException e) {
+ throw new PSQLException(
+ GT.tr("Invalid server SCRAM signature"),
+ PSQLState.CONNECTION_REJECTED,
+ e
+ );
+ }
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java
new file mode 100644
index 0000000..d7e375f
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.GeneralSecurityException;
+import java.security.KeyStore;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManagerFactory;
+
+public abstract class DbKeyStoreSocketFactory extends WrappedFactory {
+ /*
+ * Populate the WrappedFactory member factory with an SSL Socket Factory that uses the JKS
+ * keystore provided by getKeyStorePassword() and getKeyStoreStream(). A subclass only needs to
+ * implement these two methods. The key store will be used both for selecting a private key
+ * certificate to send to the server, as well as checking the server's certificate against a set
+ * of trusted CAs.
+ */
+ public DbKeyStoreSocketFactory() throws DbKeyStoreSocketException {
+ KeyStore keys;
+ char[] password;
+ try {
+ keys = KeyStore.getInstance("JKS");
+ // Call of the sub-class method during object initialization is generally a bad idea
+ password = getKeyStorePassword();
+ keys.load(getKeyStoreStream(), password);
+ } catch (GeneralSecurityException gse) {
+ throw new DbKeyStoreSocketException("Failed to load keystore: " + gse.getMessage());
+ } catch (FileNotFoundException fnfe) {
+ throw new DbKeyStoreSocketException("Failed to find keystore file." + fnfe.getMessage());
+ } catch (IOException ioe) {
+ throw new DbKeyStoreSocketException("Failed to read keystore file: " + ioe.getMessage());
+ }
+ try {
+ KeyManagerFactory keyfact =
+ KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
+ keyfact.init(keys, password);
+
+ TrustManagerFactory trustfact =
+ TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
+ trustfact.init(keys);
+
+ SSLContext ctx = SSLContext.getInstance("SSL");
+ ctx.init(keyfact.getKeyManagers(), trustfact.getTrustManagers(), null);
+ factory = ctx.getSocketFactory();
+ } catch (GeneralSecurityException gse) {
+ throw new DbKeyStoreSocketException(
+ "Failed to set up database socket factory: " + gse.getMessage());
+ }
+ }
+
+ public abstract char[] getKeyStorePassword();
+
+ public abstract InputStream getKeyStoreStream();
+
+ @SuppressWarnings("serial")
+ public static class DbKeyStoreSocketException extends Exception {
+ public DbKeyStoreSocketException(String message) {
+ super(message);
+ }
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java
new file mode 100644
index 0000000..a772e18
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2017, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import java.util.Properties;
+
+import javax.net.ssl.SSLSocketFactory;
+
+/**
+ * Socket factory that uses Java's default truststore to validate server certificate.
+ * Note: it always validates server certificate, so it might result to downgrade to non-encrypted
+ * connection when default truststore lacks certificates to validate server.
+ */
+public class DefaultJavaSSLFactory extends WrappedFactory {
+ public DefaultJavaSSLFactory(Properties info) {
+ factory = (SSLSocketFactory) SSLSocketFactory.getDefault();
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java b/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java
new file mode 100644
index 0000000..e3c46b5
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.net.Socket;
+import java.security.AlgorithmParameters;
+import java.security.GeneralSecurityException;
+import java.security.Key;
+import java.security.KeyFactory;
+import java.security.NoSuchAlgorithmException;
+import java.security.Principal;
+import java.security.PrivateKey;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateException;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509Certificate;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.KeySpec;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.util.Collection;
+
+import javax.crypto.Cipher;
+import javax.crypto.EncryptedPrivateKeyInfo;
+import javax.crypto.NoSuchPaddingException;
+import javax.crypto.SecretKeyFactory;
+import javax.crypto.spec.PBEKeySpec;
+import javax.net.ssl.X509KeyManager;
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.auth.x500.X500Principal;
+
+/**
+ * A Key manager that only loads the keys, if necessary.
+ */
+public class LazyKeyManager implements X509KeyManager {
+ private X509Certificate [] cert;
+ private PrivateKey key;
+ private final String certfile;
+ private final String keyfile;
+ private final CallbackHandler cbh;
+ private final boolean defaultfile;
+ private PSQLException error;
+
+ /**
+ * Constructor. certfile and keyfile can be null, in that case no certificate is presented to the
+ * server.
+ *
+ * @param certfile certfile
+ * @param keyfile key file
+ * @param cbh callback handler
+ * @param defaultfile default file
+ */
+ public LazyKeyManager(String certfile, String keyfile, CallbackHandler cbh, boolean defaultfile) {
+ this.certfile = certfile;
+ this.keyfile = keyfile;
+ this.cbh = cbh;
+ this.defaultfile = defaultfile;
+ }
+
+ /**
+ * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored
+ * in {@link #error} and can be raised by this method.
+ *
+ * @throws PSQLException if any exception is stored in {@link #error} and can be raised
+ */
+ public void throwKeyManagerException() throws PSQLException {
+ if (error != null) {
+ throw error;
+ }
+ }
+
+ @Override
+ public String chooseClientAlias(String[] keyType,
+ Principal [] issuers, Socket socket) {
+ if (certfile == null) {
+ return null;
+ } else {
+ if (issuers == null || issuers.length == 0) {
+ // Postgres 8.4 and earlier do not send the list of accepted certificate authorities
+ // to the client. See BUG #5468. We only hope, that our certificate will be accepted.
+ return "user";
+ } else {
+ // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
+ // pg_hba.conf.
+ // therefore we only send our certificate, if the issuer is listed in issuers
+ X509Certificate[] certchain = getCertificateChain("user");
+ if (certchain == null) {
+ return null;
+ } else {
+ X509Certificate cert = certchain[certchain.length - 1];
+ X500Principal ourissuer = cert.getIssuerX500Principal();
+ String certKeyType = cert.getPublicKey().getAlgorithm();
+ boolean keyTypeFound = false;
+ boolean found = false;
+ if (keyType != null && keyType.length > 0) {
+ for (String kt : keyType) {
+ if (kt.equalsIgnoreCase(certKeyType)) {
+ keyTypeFound = true;
+ }
+ }
+ } else {
+ // If no key types were passed in, assume we don't care
+ // about checking that the cert uses a particular key type.
+ keyTypeFound = true;
+ }
+ if (keyTypeFound) {
+ for (Principal issuer : issuers) {
+ if (ourissuer.equals(issuer)) {
+ found = keyTypeFound;
+ }
+ }
+ }
+ return found ? "user" : null;
+ }
+ }
+ }
+ }
+
+ @Override
+ public String chooseServerAlias(String keyType,
+ Principal [] issuers, Socket socket) {
+ return null; // We are not a server
+ }
+
+ @Override
+ public X509Certificate [] getCertificateChain(String alias) {
+ if (cert == null && certfile != null) {
+ // If certfile is null, we do not load the certificate
+ // The certificate must be loaded
+ CertificateFactory cf;
+ try {
+ cf = CertificateFactory.getInstance("X.509");
+ } catch (CertificateException ex) {
+ // For some strange reason it throws CertificateException instead of
+ // NoSuchAlgorithmException...
+ error = new PSQLException(GT.tr(
+ "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
+ PSQLState.CONNECTION_FAILURE, ex);
+ return null;
+ }
+ Collection extends Certificate> certs;
+ FileInputStream certfileStream = null;
+ try {
+ certfileStream = new FileInputStream(certfile);
+ certs = cf.generateCertificates(certfileStream);
+ } catch (FileNotFoundException ioex) {
+ if (!defaultfile) { // It is not an error if there is no file at the default location
+ error = new PSQLException(
+ GT.tr("Could not open SSL certificate file {0}.", certfile),
+ PSQLState.CONNECTION_FAILURE, ioex);
+ }
+ return null;
+ } catch (CertificateException gsex) {
+ error = new PSQLException(GT.tr("Loading the SSL certificate {0} into a KeyManager failed.",
+ certfile), PSQLState.CONNECTION_FAILURE, gsex);
+ return null;
+ } finally {
+ if (certfileStream != null) {
+ try {
+ certfileStream.close();
+ } catch (IOException ioex) {
+ if (!defaultfile) { // It is not an error if there is no file at the default location
+ error = new PSQLException(
+ GT.tr("Could not close SSL certificate file {0}.", certfile),
+ PSQLState.CONNECTION_FAILURE, ioex);
+ }
+ }
+ }
+ }
+ cert = certs.toArray(new X509Certificate[0]);
+ }
+ return cert;
+ }
+
+ @Override
+ public String [] getClientAliases(String keyType,
+ Principal [] issuers) {
+ String alias = chooseClientAlias(new String[]{keyType}, issuers, (Socket) null);
+ return alias == null ? new String[]{} : new String[]{alias};
+ }
+
+ private static byte[] readFileFully(String path) throws IOException {
+ RandomAccessFile raf = new RandomAccessFile(path, "r");
+ try {
+ byte[] ret = new byte[(int) raf.length()];
+ raf.readFully(ret);
+ return ret;
+ } finally {
+ raf.close();
+ }
+ }
+
+ @Override
+ public PrivateKey getPrivateKey(String alias) {
+ try {
+ if (key == null && keyfile != null) {
+ // If keyfile is null, we do not load the key
+ // The private key must be loaded
+ X509Certificate[] cert = getCertificateChain("user");
+ if (cert == null || cert.length == 0) { // We need the certificate for the algorithm
+ return null;
+ }
+
+ byte[] keydata;
+ try {
+ keydata = readFileFully(keyfile);
+ } catch (FileNotFoundException ex) {
+ if (!defaultfile) {
+ // It is not an error if there is no file at the default location
+ throw ex;
+ }
+ return null;
+ }
+
+ KeyFactory kf = KeyFactory.getInstance(cert[0].getPublicKey().getAlgorithm());
+ try {
+ KeySpec pkcs8KeySpec = new PKCS8EncodedKeySpec(keydata);
+ key = kf.generatePrivate(pkcs8KeySpec);
+ } catch (InvalidKeySpecException ex) {
+ // The key might be password protected
+ EncryptedPrivateKeyInfo ePKInfo = new EncryptedPrivateKeyInfo(keydata);
+ Cipher cipher;
+ try {
+ cipher = Cipher.getInstance(ePKInfo.getAlgName());
+ } catch (NoSuchPaddingException npex) {
+ // Why is it not a subclass of NoSuchAlgorithmException?
+ throw new NoSuchAlgorithmException(npex.getMessage(), npex);
+ }
+ // We call back for the password
+ PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
+ try {
+ cbh.handle(new Callback[]{pwdcb});
+ } catch (UnsupportedCallbackException ucex) {
+ if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
+ && ("Console is not available".equals(ucex.getMessage()))) {
+ error = new PSQLException(GT
+ .tr("Could not read password for SSL key file, console is not available."),
+ PSQLState.CONNECTION_FAILURE, ucex);
+ } else {
+ error =
+ new PSQLException(
+ GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
+ cbh.getClass().getName()),
+ PSQLState.CONNECTION_FAILURE, ucex);
+ }
+ return null;
+ }
+ try {
+ PBEKeySpec pbeKeySpec = new PBEKeySpec(pwdcb.getPassword());
+ pwdcb.clearPassword();
+ // Now create the Key from the PBEKeySpec
+ SecretKeyFactory skFac = SecretKeyFactory.getInstance(ePKInfo.getAlgName());
+ Key pbeKey = skFac.generateSecret(pbeKeySpec);
+ // Extract the iteration count and the salt
+ AlgorithmParameters algParams = ePKInfo.getAlgParameters();
+ cipher.init(Cipher.DECRYPT_MODE, pbeKey, algParams);
+ // Decrypt the encrypted private key into a PKCS8EncodedKeySpec
+ KeySpec pkcs8KeySpec = ePKInfo.getKeySpec(cipher);
+ key = kf.generatePrivate(pkcs8KeySpec);
+ } catch (GeneralSecurityException ikex) {
+ error = new PSQLException(
+ GT.tr("Could not decrypt SSL key file {0}.", keyfile),
+ PSQLState.CONNECTION_FAILURE, ikex);
+ return null;
+ }
+ }
+ }
+ } catch (IOException ioex) {
+ error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile),
+ PSQLState.CONNECTION_FAILURE, ioex);
+ } catch (NoSuchAlgorithmException ex) {
+ error = new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
+ ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex);
+ return null;
+ }
+
+ return key;
+ }
+
+ @Override
+ public String [] getServerAliases(String keyType, Principal [] issuers) {
+ return new String[]{};
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java
new file mode 100644
index 0000000..9677d59
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import org.postgresql.PGProperty;
+import org.postgresql.jdbc.SslMode;
+import org.postgresql.ssl.NonValidatingFactory.NonValidatingTM;
+import org.postgresql.util.GT;
+import org.postgresql.util.ObjectFactory;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.io.Console;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.security.KeyManagementException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateFactory;
+import java.util.Locale;
+import java.util.Properties;
+
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.TrustManagerFactory;
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+
+/**
+ * Provide an SSLSocketFactory that is compatible with the libpq behaviour.
+ */
+public class LibPQFactory extends WrappedFactory {
+
+ KeyManager km;
+ boolean defaultfile;
+
+ private CallbackHandler getCallbackHandler(LibPQFactory this, Properties info) throws PSQLException {
+ // Determine the callback handler
+ CallbackHandler cbh;
+ String sslpasswordcallback = PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(info);
+ if (sslpasswordcallback != null) {
+ try {
+ cbh = ObjectFactory.instantiate(CallbackHandler.class, sslpasswordcallback, info, false, null);
+ } catch (Exception e) {
+ throw new PSQLException(
+ GT.tr("The password callback class provided {0} could not be instantiated.",
+ sslpasswordcallback),
+ PSQLState.CONNECTION_FAILURE, e);
+ }
+ } else {
+ cbh = new ConsoleCallbackHandler(PGProperty.SSL_PASSWORD.getOrDefault(info));
+ }
+ return cbh;
+ }
+
+ private void initPk8(LibPQFactory this,
+ String sslkeyfile, String defaultdir, Properties info) throws PSQLException {
+
+ // Load the client's certificate and key
+ String sslcertfile = PGProperty.SSL_CERT.getOrDefault(info);
+ if (sslcertfile == null) { // Fall back to default
+ defaultfile = true;
+ sslcertfile = defaultdir + "postgresql.crt";
+ }
+
+ // If the properties are empty, give null to prevent client key selection
+ km = new LazyKeyManager(("".equals(sslcertfile) ? null : sslcertfile),
+ ("".equals(sslkeyfile) ? null : sslkeyfile), getCallbackHandler(info), defaultfile);
+ }
+
+ private void initP12(LibPQFactory this,
+ String sslkeyfile, Properties info) throws PSQLException {
+ km = new PKCS12KeyManager(sslkeyfile, getCallbackHandler(info));
+ }
+
+ /**
+ * @param info the connection parameters The following parameters are used:
+ * sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword
+ * @throws PSQLException if security error appears when initializing factory
+ */
+ public LibPQFactory(Properties info) throws PSQLException {
+ try {
+ SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
+
+ // Determining the default file location
+ String pathsep = System.getProperty("file.separator");
+ String defaultdir;
+
+ if (System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows")) { // It is Windows
+ defaultdir = System.getenv("APPDATA") + pathsep + "postgresql" + pathsep;
+ } else {
+ defaultdir = System.getProperty("user.home") + pathsep + ".postgresql" + pathsep;
+ }
+
+ String sslkeyfile = PGProperty.SSL_KEY.getOrDefault(info);
+ if (sslkeyfile == null) { // Fall back to default
+ defaultfile = true;
+ sslkeyfile = defaultdir + "postgresql.pk8";
+ }
+
+ if (sslkeyfile.endsWith(".p12") || sslkeyfile.endsWith(".pfx")) {
+ initP12(sslkeyfile, info);
+ } else {
+ initPk8(sslkeyfile, defaultdir, info);
+ }
+
+ TrustManager[] tm;
+ SslMode sslMode = SslMode.of(info);
+ if (!sslMode.verifyCertificate()) {
+ // server validation is not required
+ tm = new TrustManager[]{new NonValidatingTM()};
+ } else {
+ // Load the server certificate
+
+ TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX");
+ KeyStore ks;
+ try {
+ ks = KeyStore.getInstance("jks");
+ } catch (KeyStoreException e) {
+ // this should never happen
+ throw new NoSuchAlgorithmException("jks KeyStore not available");
+ }
+ String sslrootcertfile = PGProperty.SSL_ROOT_CERT.getOrDefault(info);
+ if (sslrootcertfile == null) { // Fall back to default
+ sslrootcertfile = defaultdir + "root.crt";
+ }
+ FileInputStream fis;
+ try {
+ fis = new FileInputStream(sslrootcertfile); // NOSONAR
+ } catch (FileNotFoundException ex) {
+ throw new PSQLException(
+ GT.tr("Could not open SSL root certificate file {0}.", sslrootcertfile),
+ PSQLState.CONNECTION_FAILURE, ex);
+ }
+ try {
+ CertificateFactory cf = CertificateFactory.getInstance("X.509");
+ // Certificate[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{}); //Does
+ // not work in java 1.4
+ Object[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{});
+ ks.load(null, null);
+ for (int i = 0; i < certs.length; i++) {
+ ks.setCertificateEntry("cert" + i, (Certificate) certs[i]);
+ }
+ tmf.init(ks);
+ } catch (IOException ioex) {
+ throw new PSQLException(
+ GT.tr("Could not read SSL root certificate file {0}.", sslrootcertfile),
+ PSQLState.CONNECTION_FAILURE, ioex);
+ } catch (GeneralSecurityException gsex) {
+ throw new PSQLException(
+ GT.tr("Loading the SSL root certificate {0} into a TrustManager failed.",
+ sslrootcertfile),
+ PSQLState.CONNECTION_FAILURE, gsex);
+ } finally {
+ try {
+ fis.close();
+ } catch (IOException e) {
+ /* ignore */
+ }
+ }
+ tm = tmf.getTrustManagers();
+ }
+
+ // finally we can initialize the context
+ try {
+ KeyManager km = this.km;
+ ctx.init(km == null ? null : new KeyManager[]{km}, tm, null);
+ } catch (KeyManagementException ex) {
+ throw new PSQLException(GT.tr("Could not initialize SSL context."),
+ PSQLState.CONNECTION_FAILURE, ex);
+ }
+
+ factory = ctx.getSocketFactory();
+ } catch (NoSuchAlgorithmException ex) {
+ throw new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
+ ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex);
+ }
+ }
+
+ /**
+ * Propagates any exception from {@link LazyKeyManager}.
+ *
+ * @throws PSQLException if there is an exception to propagate
+ */
+ public void throwKeyManagerException() throws PSQLException {
+ if (km != null) {
+ if (km instanceof LazyKeyManager) {
+ ((LazyKeyManager) km).throwKeyManagerException();
+ }
+ if (km instanceof PKCS12KeyManager) {
+ ((PKCS12KeyManager) km).throwKeyManagerException();
+ }
+ }
+ }
+
+ /**
+ * A CallbackHandler that reads the password from the console or returns the password given to its
+ * constructor.
+ */
+ public static class ConsoleCallbackHandler implements CallbackHandler {
+
+ private char [] password;
+
+ ConsoleCallbackHandler(String password) {
+ if (password != null) {
+ this.password = password.toCharArray();
+ }
+ }
+
+ /**
+ * Handles the callbacks.
+ *
+ * @param callbacks The callbacks to handle
+ * @throws UnsupportedCallbackException If the console is not available or other than
+ * PasswordCallback is supplied
+ */
+ @Override
+ public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
+ Console cons = System.console();
+ char[] password = this.password;
+ if (cons == null && password == null) {
+ throw new UnsupportedCallbackException(callbacks[0], "Console is not available");
+ }
+ for (Callback callback : callbacks) {
+ if (!(callback instanceof PasswordCallback)) {
+ throw new UnsupportedCallbackException(callback);
+ }
+ PasswordCallback pwdCallback = (PasswordCallback) callback;
+ if (password != null) {
+ pwdCallback.setPassword(password);
+ continue;
+ }
+ // It is used instead of cons.readPassword(prompt), because the prompt may contain '%'
+ // characters
+ pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt())
+ );
+ }
+ }
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java b/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java
new file mode 100644
index 0000000..b590970
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import org.postgresql.PGProperty;
+import org.postgresql.core.PGStream;
+import org.postgresql.core.SocketFactoryFactory;
+import org.postgresql.jdbc.SslMode;
+import org.postgresql.util.GT;
+import org.postgresql.util.ObjectFactory;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.io.IOException;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLSocket;
+import javax.net.ssl.SSLSocketFactory;
+
+public class MakeSSL extends ObjectFactory {
+
+ private static final Logger LOGGER = Logger.getLogger(MakeSSL.class.getName());
+
+ public static void convert(PGStream stream, Properties info)
+ throws PSQLException, IOException {
+ LOGGER.log(Level.FINE, "converting regular socket connection to ssl");
+
+ SSLSocketFactory factory = SocketFactoryFactory.getSslSocketFactory(info);
+ SSLSocket newConnection;
+ try {
+ newConnection = (SSLSocket) factory.createSocket(stream.getSocket(),
+ stream.getHostSpec().getHost(), stream.getHostSpec().getPort(), true);
+ int connectTimeoutSeconds = PGProperty.CONNECT_TIMEOUT.getInt(info);
+ newConnection.setSoTimeout(connectTimeoutSeconds * 1000);
+ // We must invoke manually, otherwise the exceptions are hidden
+ newConnection.setUseClientMode(true);
+ newConnection.startHandshake();
+ } catch (IOException ex) {
+ throw new PSQLException(GT.tr("SSL error: {0}", ex.getMessage()),
+ PSQLState.CONNECTION_FAILURE, ex);
+ }
+ if (factory instanceof LibPQFactory) { // throw any KeyManager exception
+ ((LibPQFactory) factory).throwKeyManagerException();
+ }
+
+ SslMode sslMode = SslMode.of(info);
+ if (sslMode.verifyPeerName()) {
+ verifyPeerName(stream, info, newConnection);
+ }
+ // Zero timeout (default) means infinite
+ int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info);
+ newConnection.setSoTimeout(socketTimeout * 1000);
+ stream.changeSocket(newConnection);
+ }
+
+ private static void verifyPeerName(PGStream stream, Properties info, SSLSocket newConnection)
+ throws PSQLException {
+ HostnameVerifier hvn;
+ String sslhostnameverifier = PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(info);
+ if (sslhostnameverifier == null) {
+ hvn = PGjdbcHostnameVerifier.INSTANCE;
+ sslhostnameverifier = "PgjdbcHostnameVerifier";
+ } else {
+ try {
+ hvn = instantiate(HostnameVerifier.class, sslhostnameverifier, info, false, null);
+ } catch (Exception e) {
+ throw new PSQLException(
+ GT.tr("The HostnameVerifier class provided {0} could not be instantiated.",
+ sslhostnameverifier),
+ PSQLState.CONNECTION_FAILURE, e);
+ }
+ }
+
+ if (hvn.verify(stream.getHostSpec().getHost(), newConnection.getSession())) {
+ return;
+ }
+
+ throw new PSQLException(
+ GT.tr("The hostname {0} could not be verified by hostnameverifier {1}.",
+ stream.getHostSpec().getHost(), sslhostnameverifier),
+ PSQLState.CONNECTION_FAILURE);
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java
new file mode 100644
index 0000000..649a54d
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import java.security.GeneralSecurityException;
+import java.security.cert.X509Certificate;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
+
+/**
+ * Provide a SSLSocketFactory that allows SSL connections to be made without validating the server's
+ * certificate. This is more convenient for some applications, but is less secure as it allows "man
+ * in the middle" attacks.
+ */
+public class NonValidatingFactory extends WrappedFactory {
+
+ /**
+ * We provide a constructor that takes an unused argument solely because the ssl calling code will
+ * look for this constructor first and then fall back to the no argument constructor, so we avoid
+ * an exception and additional reflection lookups.
+ *
+ * @param arg input argument
+ * @throws GeneralSecurityException if something goes wrong
+ */
+ public NonValidatingFactory(String arg) throws GeneralSecurityException {
+ SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
+
+ ctx.init(null, new TrustManager[]{new NonValidatingTM()}, null);
+
+ factory = ctx.getSocketFactory();
+ }
+
+ public static class NonValidatingTM implements X509TrustManager {
+
+ @Override
+ public X509Certificate[] getAcceptedIssuers() {
+ return new X509Certificate[0];
+ }
+
+ @Override
+ public void checkClientTrusted(X509Certificate[] certs, String authType) {
+ }
+
+ @Override
+ public void checkServerTrusted(X509Certificate[] certs, String authType) {
+ }
+ }
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java b/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java
new file mode 100644
index 0000000..dbd432c
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import org.postgresql.util.GT;
+
+import java.net.IDN;
+import java.security.cert.CertificateParsingException;
+import java.security.cert.X509Certificate;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.naming.InvalidNameException;
+import javax.naming.ldap.LdapName;
+import javax.naming.ldap.Rdn;
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLPeerUnverifiedException;
+import javax.net.ssl.SSLSession;
+import javax.security.auth.x500.X500Principal;
+
+public class PGjdbcHostnameVerifier implements HostnameVerifier {
+ private static final Logger LOGGER = Logger.getLogger(PGjdbcHostnameVerifier.class.getName());
+
+ public static final PGjdbcHostnameVerifier INSTANCE = new PGjdbcHostnameVerifier();
+
+ private static final int TYPE_DNS_NAME = 2;
+ private static final int TYPE_IP_ADDRESS = 7;
+
+ public static final Comparator HOSTNAME_PATTERN_COMPARATOR = new Comparator() {
+ private int countChars(String value, char ch) {
+ int count = 0;
+ int pos = -1;
+ while (true) {
+ pos = value.indexOf(ch, pos + 1);
+ if (pos == -1) {
+ break;
+ }
+ count++;
+ }
+ return count;
+ }
+
+ @Override
+ public int compare(String o1, String o2) {
+ // The more the dots the better: a.b.c.postgresql.org is more specific than postgresql.org
+ int d1 = countChars(o1, '.');
+ int d2 = countChars(o2, '.');
+ if (d1 != d2) {
+ return d1 > d2 ? 1 : -1;
+ }
+
+ // The less the stars the better: postgresql.org is more specific than *.*.postgresql.org
+ int s1 = countChars(o1, '*');
+ int s2 = countChars(o2, '*');
+ if (s1 != s2) {
+ return s1 < s2 ? 1 : -1;
+ }
+
+ // The longer the better: postgresql.org is more specific than sql.org
+ int l1 = o1.length();
+ int l2 = o2.length();
+ if (l1 != l2) {
+ return l1 > l2 ? 1 : -1;
+ }
+
+ return 0;
+ }
+ };
+
+ @Override
+ public boolean verify(String hostname, SSLSession session) {
+ X509Certificate[] peerCerts;
+ try {
+ peerCerts = (X509Certificate[]) session.getPeerCertificates();
+ } catch (SSLPeerUnverifiedException e) {
+ LOGGER.log(Level.SEVERE,
+ GT.tr("Unable to parse X509Certificate for hostname {0}", hostname), e);
+ return false;
+ }
+ if (peerCerts == null || peerCerts.length == 0) {
+ LOGGER.log(Level.SEVERE,
+ GT.tr("No certificates found for hostname {0}", hostname));
+ return false;
+ }
+
+ String canonicalHostname;
+ if (hostname.startsWith("[") && hostname.endsWith("]")) {
+ // IPv6 address like [2001:db8:0:1:1:1:1:1]
+ canonicalHostname = hostname.substring(1, hostname.length() - 1);
+ } else {
+ // This converts unicode domain name to ASCII
+ try {
+ canonicalHostname = IDN.toASCII(hostname);
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST, "Canonical host name for {0} is {1}",
+ new Object[]{hostname, canonicalHostname});
+ }
+ } catch (IllegalArgumentException e) {
+ // e.g. hostname is invalid
+ LOGGER.log(Level.SEVERE,
+ GT.tr("Hostname {0} is invalid", hostname), e);
+ return false;
+ }
+ }
+
+ X509Certificate serverCert = peerCerts[0];
+
+ // Check for Subject Alternative Names (see RFC 6125)
+
+ Collection> subjectAltNames;
+ try {
+ subjectAltNames = serverCert.getSubjectAlternativeNames();
+ if (subjectAltNames == null) {
+ subjectAltNames = Collections.emptyList();
+ }
+ } catch (CertificateParsingException e) {
+ LOGGER.log(Level.SEVERE,
+ GT.tr("Unable to parse certificates for hostname {0}", hostname), e);
+ return false;
+ }
+
+ boolean anyDnsSan = false;
+ /*
+ * Each item in the SAN collection is a 2-element list.
+ * See {@link X509Certificate#getSubjectAlternativeNames}
+ * The first element in each list is a number indicating the type of entry.
+ */
+ for (List> sanItem : subjectAltNames) {
+ if (sanItem.size() != 2) {
+ continue;
+ }
+ Integer sanType = (Integer) sanItem.get(0);
+ if (sanType == null) {
+ // just in case
+ continue;
+ }
+ if (sanType != TYPE_IP_ADDRESS && sanType != TYPE_DNS_NAME) {
+ continue;
+ }
+ String san = (String) sanItem.get(1);
+ if (sanType == TYPE_IP_ADDRESS && san != null && san.startsWith("*")) {
+ // Wildcards should not be present in the IP Address field
+ continue;
+ }
+ anyDnsSan |= sanType == TYPE_DNS_NAME;
+ if (verifyHostName(canonicalHostname, san)) {
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(Level.FINEST,
+ GT.tr("Server name validation pass for {0}, subjectAltName {1}", hostname, san));
+ }
+ return true;
+ }
+ }
+
+ if (anyDnsSan) {
+ /*
+ * RFC2818, section 3.1 (I bet you won't recheck :)
+ * If a subjectAltName extension of type dNSName is present, that MUST
+ * be used as the identity. Otherwise, the (most specific) Common Name
+ * field in the Subject field of the certificate MUST be used. Although
+ * the use of the Common Name is existing practice, it is deprecated and
+ * Certification Authorities are encouraged to use the dNSName instead.
+ */
+ LOGGER.log(Level.SEVERE,
+ GT.tr("Server name validation failed: certificate for host {0} dNSName entries subjectAltName,"
+ + " but none of them match. Assuming server name validation failed", hostname));
+ return false;
+ }
+
+ // Last attempt: no DNS Subject Alternative Name entries detected, try common name
+ LdapName dn;
+ try {
+ dn = new LdapName(serverCert.getSubjectX500Principal().getName(X500Principal.RFC2253));
+ } catch (InvalidNameException e) {
+ LOGGER.log(Level.SEVERE,
+ GT.tr("Server name validation failed: unable to extract common name"
+ + " from X509Certificate for hostname {0}", hostname), e);
+ return false;
+ }
+
+ List commonNames = new ArrayList<>(1);
+ for (Rdn rdn : dn.getRdns()) {
+ if ("CN".equals(rdn.getType())) {
+ commonNames.add((String) rdn.getValue());
+ }
+ }
+ if (commonNames.isEmpty()) {
+ LOGGER.log(Level.SEVERE,
+ GT.tr("Server name validation failed: certificate for hostname {0} has no DNS subjectAltNames,"
+ + " and it CommonName is missing as well",
+ hostname));
+ return false;
+ }
+ if (commonNames.size() > 1) {
+ /*
+ * RFC2818, section 3.1
+ * If a subjectAltName extension of type dNSName is present, that MUST
+ * be used as the identity. Otherwise, the (most specific) Common Name
+ * field in the Subject field of the certificate MUST be used
+ *
+ * The sort is from less specific to most specific.
+ */
+ Collections.sort(commonNames, HOSTNAME_PATTERN_COMPARATOR);
+ }
+ String commonName = commonNames.get(commonNames.size() - 1);
+ boolean result = verifyHostName(canonicalHostname, commonName);
+ if (!result) {
+ LOGGER.log(Level.SEVERE,
+ GT.tr("Server name validation failed: hostname {0} does not match common name {1}",
+ hostname, commonName));
+ }
+ return result;
+ }
+
+ public boolean verifyHostName(String hostname, String pattern) {
+ if (hostname == null || pattern == null) {
+ return false;
+ }
+ int lastStar = pattern.lastIndexOf('*');
+ if (lastStar == -1) {
+ // No wildcard => just compare hostnames
+ return hostname.equalsIgnoreCase(pattern);
+ }
+ if (lastStar > 0) {
+ // Wildcards like foo*.com are not supported yet
+ return false;
+ }
+ if (pattern.indexOf('.') == -1) {
+ // Wildcard certificates should contain at least one dot
+ return false;
+ }
+ // pattern starts with *, so hostname should be at least (pattern.length-1) long
+ if (hostname.length() < pattern.length() - 1) {
+ return false;
+ }
+ // Use case insensitive comparison
+ final boolean ignoreCase = true;
+ // Below code is "hostname.endsWithIgnoreCase(pattern.withoutFirstStar())"
+
+ // E.g. hostname==sub.host.com; pattern==*.host.com
+ // We need to start the offset of ".host.com" in hostname
+ // For this we take hostname.length() - pattern.length()
+ // and +1 is required since pattern is known to start with *
+ int toffset = hostname.length() - pattern.length() + 1;
+
+ // Wildcard covers just one domain level
+ // a.b.c.com should not be covered by *.c.com
+ if (hostname.lastIndexOf('.', toffset - 1) >= 0) {
+ // If there's a dot in between 0..toffset
+ return false;
+ }
+
+ return hostname.regionMatches(ignoreCase, toffset,
+ pattern, 1, pattern.length() - 1);
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java b/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java
new file mode 100644
index 0000000..4f12420
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2019, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+
+import java.io.FileInputStream;
+import java.net.Socket;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.Principal;
+import java.security.PrivateKey;
+import java.security.cert.Certificate;
+import java.security.cert.X509Certificate;
+
+import javax.net.ssl.X509KeyManager;
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.auth.x500.X500Principal;
+
+public class PKCS12KeyManager implements X509KeyManager {
+
+ private final CallbackHandler cbh;
+ private PSQLException error;
+ private final String keyfile;
+ private final KeyStore keyStore;
+ boolean keystoreLoaded;
+ private final ResourceLock lock = new ResourceLock();
+
+ public PKCS12KeyManager(String pkcsFile, CallbackHandler cbh) throws PSQLException {
+ try {
+ keyStore = KeyStore.getInstance("pkcs12");
+ keyfile = pkcsFile;
+ this.cbh = cbh;
+ } catch ( KeyStoreException kse ) {
+ throw new PSQLException(GT.tr(
+ "Unable to find pkcs12 keystore."),
+ PSQLState.CONNECTION_FAILURE, kse);
+ }
+ }
+
+ /**
+ * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored
+ * in {@link #error} and can be raised by this method.
+ *
+ * @throws PSQLException if any exception is stored in {@link #error} and can be raised
+ */
+ public void throwKeyManagerException() throws PSQLException {
+ if (error != null) {
+ throw error;
+ }
+ }
+
+ @Override
+ public String [] getClientAliases(String keyType, Principal [] principals) {
+ String alias = chooseClientAlias(new String[]{keyType}, principals, (Socket) null);
+ return alias == null ? null : new String[]{alias};
+ }
+
+ @Override
+ public String chooseClientAlias(String[] keyType, Principal [] principals,
+ Socket socket) {
+ if (principals == null || principals.length == 0) {
+ // Postgres 8.4 and earlier do not send the list of accepted certificate authorities
+ // to the client. See BUG #5468. We only hope, that our certificate will be accepted.
+ return "user";
+ } else {
+ // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
+ // pg_hba.conf.
+ // therefore we only send our certificate, if the issuer is listed in issuers
+ X509Certificate[] certchain = getCertificateChain("user");
+ if (certchain == null) {
+ return null;
+ } else {
+ X509Certificate cert = certchain[certchain.length - 1];
+ X500Principal ourissuer = cert.getIssuerX500Principal();
+ String certKeyType = cert.getPublicKey().getAlgorithm();
+ boolean keyTypeFound = false;
+ boolean found = false;
+ if (keyType != null && keyType.length > 0) {
+ for (String kt : keyType) {
+ if (kt.equalsIgnoreCase(certKeyType)) {
+ keyTypeFound = true;
+ }
+ }
+ } else {
+ // If no key types were passed in, assume we don't care
+ // about checking that the cert uses a particular key type.
+ keyTypeFound = true;
+ }
+ if (keyTypeFound) {
+ for (Principal issuer : principals) {
+ if (ourissuer.equals(issuer)) {
+ found = keyTypeFound;
+ }
+ }
+ }
+ return found ? "user" : null;
+ }
+ }
+ }
+
+ @Override
+ public String [] getServerAliases(String s, Principal [] principals) {
+ return new String[]{};
+ }
+
+ @Override
+ public String chooseServerAlias(String s, Principal [] principals,
+ Socket socket) {
+ // we are not a server
+ return null;
+ }
+
+ @Override
+ public X509Certificate [] getCertificateChain(String alias) {
+ try {
+ loadKeyStore();
+ Certificate[] certs = keyStore.getCertificateChain(alias);
+ if (certs == null) {
+ return null;
+ }
+ X509Certificate[] x509Certificates = new X509Certificate[certs.length];
+ int i = 0;
+ for (Certificate cert : certs) {
+ x509Certificates[i++] = (X509Certificate) cert;
+ }
+ return x509Certificates;
+ } catch (Exception kse) {
+ error = new PSQLException(GT.tr(
+ "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
+ PSQLState.CONNECTION_FAILURE, kse);
+ }
+ return null;
+ }
+
+ @Override
+ public PrivateKey getPrivateKey(String s) {
+ try {
+ loadKeyStore();
+ PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
+ cbh.handle(new Callback[]{pwdcb});
+
+ KeyStore.ProtectionParameter protParam = new KeyStore.PasswordProtection(pwdcb.getPassword());
+ KeyStore.PrivateKeyEntry pkEntry =
+ (KeyStore.PrivateKeyEntry) keyStore.getEntry("user", protParam);
+ if (pkEntry == null) {
+ return null;
+ }
+ return pkEntry.getPrivateKey();
+ } catch (Exception ioex ) {
+ error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile),
+ PSQLState.CONNECTION_FAILURE, ioex);
+ }
+ return null;
+ }
+
+ @SuppressWarnings("try")
+ private void loadKeyStore() throws Exception {
+ try (ResourceLock ignore = lock.obtain()) {
+ if (keystoreLoaded) {
+ return;
+ }
+ // We call back for the password
+ PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
+ try {
+ cbh.handle(new Callback[]{pwdcb});
+ } catch (UnsupportedCallbackException ucex) {
+ if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
+ && ("Console is not available".equals(ucex.getMessage()))) {
+ error = new PSQLException(GT
+ .tr("Could not read password for SSL key file, console is not available."),
+ PSQLState.CONNECTION_FAILURE, ucex);
+ } else {
+ error =
+ new PSQLException(
+ GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
+ cbh.getClass().getName()),
+ PSQLState.CONNECTION_FAILURE, ucex);
+ }
+
+ }
+
+ keyStore.load(new FileInputStream(keyfile), pwdcb.getPassword());
+ keystoreLoaded = true;
+ }
+ }
+
+}
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java
new file mode 100644
index 0000000..b42f635
--- /dev/null
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.ssl;
+
+import org.postgresql.util.GT;
+
+import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.security.GeneralSecurityException;
+import java.security.KeyStore;
+import java.security.cert.CertificateException;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509Certificate;
+import java.util.UUID;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.TrustManagerFactory;
+import javax.net.ssl.X509TrustManager;
+
+/**
+ *